am 74f6f8c3: Merge "metricsd: Make the unit tests pass."

* commit '74f6f8c323a99586316fb1c01452fefadb3f9b5b':
  metricsd: Make the unit tests pass.
This commit is contained in:
Bertrand Simonnet 2015-09-03 00:31:57 +00:00 committed by Android Git Automerger
commit 827f64625e
13 changed files with 281 additions and 425 deletions

View file

@ -41,6 +41,28 @@ metrics_daemon_sources := \
serialization/metric_sample.cc \ serialization/metric_sample.cc \
serialization/serialization_utils.cc serialization/serialization_utils.cc
metrics_tests_sources := \
metrics_daemon.cc \
metrics_daemon_test.cc \
metrics_library_test.cc \
persistent_integer.cc \
persistent_integer_test.cc \
serialization/metric_sample.cc \
serialization/serialization_utils.cc \
serialization/serialization_utils_unittest.cc \
timer.cc \
timer_test.cc \
uploader/metrics_hashes.cc \
uploader/metrics_hashes_unittest.cc \
uploader/metrics_log_base.cc \
uploader/metrics_log_base_unittest.cc \
uploader/metrics_log.cc \
uploader/mock/sender_mock.cc \
uploader/sender_http.cc \
uploader/system_profile_cache.cc \
uploader/upload_service.cc \
uploader/upload_service_test.cc \
metrics_CFLAGS := -Wall \ metrics_CFLAGS := -Wall \
-Wno-char-subscripts \ -Wno-char-subscripts \
-Wno-missing-field-initializers \ -Wno-missing-field-initializers \
@ -125,4 +147,25 @@ LOCAL_SRC_FILES := init.$(LOCAL_INIT_SERVICE).rc
include $(BUILD_PREBUILT) include $(BUILD_PREBUILT)
endif # INITRC_TEMPLATE endif # INITRC_TEMPLATE
# Unit tests for metrics.
# ========================================================
include $(CLEAR_VARS)
LOCAL_MODULE := metrics_tests
LOCAL_CFLAGS := $(metrics_CFLAGS)
LOCAL_CPP_EXTENSION := $(metrics_cpp_extension)
LOCAL_CPPFLAGS := $(metrics_CPPFLAGS) -Wno-sign-compare
LOCAL_RTTI_FLAG := -frtti
LOCAL_SHARED_LIBRARIES := $(metrics_shared_libraries) \
libmetrics \
libprotobuf-cpp-lite \
libchromeos-http \
libchromeos-dbus \
libcutils \
libdbus \
LOCAL_SRC_FILES := $(metrics_tests_sources)
LOCAL_STATIC_LIBRARIES := libBionicGtestMain libgmock metrics_daemon_protos
include $(BUILD_NATIVE_TEST)
endif # HOST_OS == linux endif # HOST_OS == linux

View file

@ -129,6 +129,9 @@ class MetricsLibrary : public MetricsLibraryInterface {
FRIEND_TEST(MetricsLibraryTest, SendMessageToChrome); FRIEND_TEST(MetricsLibraryTest, SendMessageToChrome);
FRIEND_TEST(MetricsLibraryTest, SendMessageToChromeUMAEventsBadFileLocation); FRIEND_TEST(MetricsLibraryTest, SendMessageToChromeUMAEventsBadFileLocation);
void InitForTest(const std::string& uma_events_file,
const std::string& consent_file);
// Sets |*result| to whether or not the |mounts_file| indicates that // Sets |*result| to whether or not the |mounts_file| indicates that
// the |device_name| is currently mounted. Uses |buffer| of // the |device_name| is currently mounted. Uses |buffer| of
// |buffer_size| to read the file. Returns false if any error. // |buffer_size| to read the file. Returns false if any error.

View file

@ -195,10 +195,10 @@ int MetricsDaemon::Run() {
} }
void MetricsDaemon::RunUploaderTest() { void MetricsDaemon::RunUploaderTest() {
upload_service_.reset(new UploadService(new SystemProfileCache(true, upload_service_.reset(new UploadService(
config_root_), new SystemProfileCache(true, base::FilePath(config_root_)),
metrics_lib_, metrics_lib_,
server_)); server_));
upload_service_->Init(upload_interval_, metrics_file_); upload_service_->Init(upload_interval_, metrics_file_);
upload_service_->UploadEvent(); upload_service_->UploadEvent();
} }

View file

@ -22,11 +22,12 @@
#include <base/at_exit.h> #include <base/at_exit.h>
#include <base/files/file_util.h> #include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
#include <base/strings/string_number_conversions.h> #include <base/strings/string_number_conversions.h>
#include <base/strings/stringprintf.h> #include <base/strings/stringprintf.h>
#include <chromeos/dbus/service_constants.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "constants.h"
#include "metrics_daemon.h" #include "metrics_daemon.h"
#include "metrics_library_mock.h" #include "metrics_library_mock.h"
#include "persistent_integer_mock.h" #include "persistent_integer_mock.h"
@ -45,7 +46,6 @@ using ::testing::Return;
using ::testing::StrictMock; using ::testing::StrictMock;
using chromeos_metrics::PersistentIntegerMock; using chromeos_metrics::PersistentIntegerMock;
static const char kFakeDiskStatsName[] = "fake-disk-stats";
static const char kFakeDiskStatsFormat[] = static const char kFakeDiskStatsFormat[] =
" 1793 1788 %" PRIu64 " 105580 " " 1793 1788 %" PRIu64 " 105580 "
" 196 175 %" PRIu64 " 30290 " " 196 175 %" PRIu64 " 30290 "
@ -53,9 +53,6 @@ static const char kFakeDiskStatsFormat[] =
static const uint64_t kFakeReadSectors[] = {80000, 100000}; static const uint64_t kFakeReadSectors[] = {80000, 100000};
static const uint64_t kFakeWriteSectors[] = {3000, 4000}; static const uint64_t kFakeWriteSectors[] = {3000, 4000};
static const char kFakeVmStatsName[] = "fake-vm-stats";
static const char kFakeScalingMaxFreqPath[] = "fake-scaling-max-freq";
static const char kFakeCpuinfoMaxFreqPath[] = "fake-cpuinfo-max-freq";
class MetricsDaemonTest : public testing::Test { class MetricsDaemonTest : public testing::Test {
protected: protected:
@ -63,78 +60,35 @@ class MetricsDaemonTest : public testing::Test {
std::string kFakeDiskStats1; std::string kFakeDiskStats1;
virtual void SetUp() { virtual void SetUp() {
EXPECT_TRUE(temp_dir_.CreateUniqueTempDir());
scaling_max_freq_path_ = temp_dir_.path().Append("scaling_max");
cpu_max_freq_path_ = temp_dir_.path().Append("cpu_freq_max");
disk_stats_path_ = temp_dir_.path().Append("disk_stats");
kFakeDiskStats0 = base::StringPrintf(kFakeDiskStatsFormat, kFakeDiskStats0 = base::StringPrintf(kFakeDiskStatsFormat,
kFakeReadSectors[0], kFakeReadSectors[0],
kFakeWriteSectors[0]); kFakeWriteSectors[0]);
kFakeDiskStats1 = base::StringPrintf(kFakeDiskStatsFormat, kFakeDiskStats1 = base::StringPrintf(kFakeDiskStatsFormat,
kFakeReadSectors[1], kFakeReadSectors[1],
kFakeWriteSectors[1]); kFakeWriteSectors[1]);
CreateFakeDiskStatsFile(kFakeDiskStats0.c_str());
CreateUint64ValueFile(base::FilePath(kFakeCpuinfoMaxFreqPath), 10000000);
CreateUint64ValueFile(base::FilePath(kFakeScalingMaxFreqPath), 10000000);
chromeos_metrics::PersistentInteger::SetTestingMode(true); CreateFakeDiskStatsFile(kFakeDiskStats0);
CreateUint64ValueFile(cpu_max_freq_path_, 10000000);
CreateUint64ValueFile(scaling_max_freq_path_, 10000000);
chromeos_metrics::PersistentInteger::SetMetricsDirectory(
temp_dir_.path().value());
daemon_.Init(true, daemon_.Init(true,
false, false,
true,
&metrics_lib_, &metrics_lib_,
kFakeDiskStatsName, disk_stats_path_.value(),
kFakeVmStatsName, scaling_max_freq_path_.value(),
kFakeScalingMaxFreqPath, cpu_max_freq_path_.value(),
kFakeCpuinfoMaxFreqPath,
base::TimeDelta::FromMinutes(30), base::TimeDelta::FromMinutes(30),
kMetricsServer, metrics::kMetricsServer,
kMetricsFilePath, metrics::kMetricsEventsFilePath,
"/"); "/");
// Replace original persistent values with mock ones.
daily_active_use_mock_ =
new StrictMock<PersistentIntegerMock>("1.mock");
daemon_.daily_active_use_.reset(daily_active_use_mock_);
kernel_crash_interval_mock_ =
new StrictMock<PersistentIntegerMock>("2.mock");
daemon_.kernel_crash_interval_.reset(kernel_crash_interval_mock_);
user_crash_interval_mock_ =
new StrictMock<PersistentIntegerMock>("3.mock");
daemon_.user_crash_interval_.reset(user_crash_interval_mock_);
unclean_shutdown_interval_mock_ =
new StrictMock<PersistentIntegerMock>("4.mock");
daemon_.unclean_shutdown_interval_.reset(unclean_shutdown_interval_mock_);
}
virtual void TearDown() {
EXPECT_EQ(0, unlink(kFakeDiskStatsName));
EXPECT_EQ(0, unlink(kFakeScalingMaxFreqPath));
EXPECT_EQ(0, unlink(kFakeCpuinfoMaxFreqPath));
}
// Adds active use aggregation counters update expectations that the
// specified count will be added.
void ExpectActiveUseUpdate(int count) {
EXPECT_CALL(*daily_active_use_mock_, Add(count))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*kernel_crash_interval_mock_, Add(count))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*user_crash_interval_mock_, Add(count))
.Times(1)
.RetiresOnSaturation();
}
// As above, but ignore values of counter updates.
void IgnoreActiveUseUpdate() {
EXPECT_CALL(*daily_active_use_mock_, Add(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*kernel_crash_interval_mock_, Add(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*user_crash_interval_mock_, Add(_))
.Times(1)
.RetiresOnSaturation();
} }
// Adds a metrics library mock expectation that the specified metric // Adds a metrics library mock expectation that the specified metric
@ -177,19 +131,15 @@ class MetricsDaemonTest : public testing::Test {
} }
// Creates or overwrites an input file containing fake disk stats. // Creates or overwrites an input file containing fake disk stats.
void CreateFakeDiskStatsFile(const char* fake_stats) { void CreateFakeDiskStatsFile(const string& fake_stats) {
if (unlink(kFakeDiskStatsName) < 0) { EXPECT_EQ(base::WriteFile(disk_stats_path_,
EXPECT_EQ(errno, ENOENT); fake_stats.data(), fake_stats.size()),
} fake_stats.size());
FILE* f = fopen(kFakeDiskStatsName, "w");
EXPECT_EQ(1, fwrite(fake_stats, strlen(fake_stats), 1, f));
EXPECT_EQ(0, fclose(f));
} }
// Creates or overwrites the file in |path| so that it contains the printable // Creates or overwrites the file in |path| so that it contains the printable
// representation of |value|. // representation of |value|.
void CreateUint64ValueFile(const base::FilePath& path, uint64_t value) { void CreateUint64ValueFile(const base::FilePath& path, uint64_t value) {
base::DeleteFile(path, false);
std::string value_string = base::Uint64ToString(value); std::string value_string = base::Uint64ToString(value);
ASSERT_EQ(value_string.length(), ASSERT_EQ(value_string.length(),
base::WriteFile(path, value_string.c_str(), base::WriteFile(path, value_string.c_str(),
@ -199,29 +149,19 @@ class MetricsDaemonTest : public testing::Test {
// The MetricsDaemon under test. // The MetricsDaemon under test.
MetricsDaemon daemon_; MetricsDaemon daemon_;
// Temporary directory used for tests.
base::ScopedTempDir temp_dir_;
// Path for the fake files.
base::FilePath scaling_max_freq_path_;
base::FilePath cpu_max_freq_path_;
base::FilePath disk_stats_path_;
// Mocks. They are strict mock so that all unexpected // Mocks. They are strict mock so that all unexpected
// calls are marked as failures. // calls are marked as failures.
StrictMock<MetricsLibraryMock> metrics_lib_; StrictMock<MetricsLibraryMock> metrics_lib_;
StrictMock<PersistentIntegerMock>* daily_active_use_mock_;
StrictMock<PersistentIntegerMock>* kernel_crash_interval_mock_;
StrictMock<PersistentIntegerMock>* user_crash_interval_mock_;
StrictMock<PersistentIntegerMock>* unclean_shutdown_interval_mock_;
}; };
TEST_F(MetricsDaemonTest, CheckSystemCrash) {
static const char kKernelCrashDetected[] = "test-kernel-crash-detected";
EXPECT_FALSE(daemon_.CheckSystemCrash(kKernelCrashDetected));
base::FilePath crash_detected(kKernelCrashDetected);
base::WriteFile(crash_detected, "", 0);
EXPECT_TRUE(base::PathExists(crash_detected));
EXPECT_TRUE(daemon_.CheckSystemCrash(kKernelCrashDetected));
EXPECT_FALSE(base::PathExists(crash_detected));
EXPECT_FALSE(daemon_.CheckSystemCrash(kKernelCrashDetected));
EXPECT_FALSE(base::PathExists(crash_detected));
base::DeleteFile(crash_detected, false);
}
TEST_F(MetricsDaemonTest, MessageFilter) { TEST_F(MetricsDaemonTest, MessageFilter) {
// Ignore calls to SendToUMA. // Ignore calls to SendToUMA.
EXPECT_CALL(metrics_lib_, SendToUMA(_, _, _, _, _)).Times(AnyNumber()); EXPECT_CALL(metrics_lib_, SendToUMA(_, _, _, _, _)).Times(AnyNumber());
@ -232,7 +172,6 @@ TEST_F(MetricsDaemonTest, MessageFilter) {
EXPECT_EQ(DBUS_HANDLER_RESULT_NOT_YET_HANDLED, res); EXPECT_EQ(DBUS_HANDLER_RESULT_NOT_YET_HANDLED, res);
DeleteDBusMessage(msg); DeleteDBusMessage(msg);
IgnoreActiveUseUpdate();
vector<string> signal_args; vector<string> signal_args;
msg = NewDBusSignalString("/", msg = NewDBusSignalString("/",
"org.chromium.CrashReporter", "org.chromium.CrashReporter",
@ -260,25 +199,6 @@ TEST_F(MetricsDaemonTest, SendSample) {
/* min */ 1, /* max */ 100, /* buckets */ 50); /* min */ 1, /* max */ 100, /* buckets */ 50);
} }
TEST_F(MetricsDaemonTest, ReportDiskStats) {
uint64_t read_sectors_now, write_sectors_now;
CreateFakeDiskStatsFile(kFakeDiskStats1.c_str());
daemon_.DiskStatsReadStats(&read_sectors_now, &write_sectors_now);
EXPECT_EQ(read_sectors_now, kFakeReadSectors[1]);
EXPECT_EQ(write_sectors_now, kFakeWriteSectors[1]);
MetricsDaemon::StatsState s_state = daemon_.stats_state_;
EXPECT_CALL(metrics_lib_,
SendToUMA(_, (kFakeReadSectors[1] - kFakeReadSectors[0]) / 30,
_, _, _));
EXPECT_CALL(metrics_lib_,
SendToUMA(_, (kFakeWriteSectors[1] - kFakeWriteSectors[0]) / 30,
_, _, _));
EXPECT_CALL(metrics_lib_, SendEnumToUMA(_, _, _)); // SendCpuThrottleMetrics
daemon_.StatsCallback();
EXPECT_TRUE(s_state != daemon_.stats_state_);
}
TEST_F(MetricsDaemonTest, ProcessMeminfo) { TEST_F(MetricsDaemonTest, ProcessMeminfo) {
string meminfo = string meminfo =
"MemTotal: 2000000 kB\nMemFree: 500000 kB\n" "MemTotal: 2000000 kB\nMemFree: 500000 kB\n"
@ -337,24 +257,24 @@ TEST_F(MetricsDaemonTest, ReadFreqToInt) {
const int fake_max_freq = 2000000; const int fake_max_freq = 2000000;
int scaled_freq = 0; int scaled_freq = 0;
int max_freq = 0; int max_freq = 0;
CreateUint64ValueFile(base::FilePath(kFakeScalingMaxFreqPath), CreateUint64ValueFile(scaling_max_freq_path_, fake_scaled_freq);
fake_scaled_freq); CreateUint64ValueFile(cpu_max_freq_path_, fake_max_freq);
CreateUint64ValueFile(base::FilePath(kFakeCpuinfoMaxFreqPath), fake_max_freq);
EXPECT_TRUE(daemon_.testing_); EXPECT_TRUE(daemon_.testing_);
EXPECT_TRUE(daemon_.ReadFreqToInt(kFakeScalingMaxFreqPath, &scaled_freq)); EXPECT_TRUE(daemon_.ReadFreqToInt(scaling_max_freq_path_.value(),
EXPECT_TRUE(daemon_.ReadFreqToInt(kFakeCpuinfoMaxFreqPath, &max_freq)); &scaled_freq));
EXPECT_TRUE(daemon_.ReadFreqToInt(cpu_max_freq_path_.value(), &max_freq));
EXPECT_EQ(fake_scaled_freq, scaled_freq); EXPECT_EQ(fake_scaled_freq, scaled_freq);
EXPECT_EQ(fake_max_freq, max_freq); EXPECT_EQ(fake_max_freq, max_freq);
} }
TEST_F(MetricsDaemonTest, SendCpuThrottleMetrics) { TEST_F(MetricsDaemonTest, SendCpuThrottleMetrics) {
CreateUint64ValueFile(base::FilePath(kFakeCpuinfoMaxFreqPath), 2001000); CreateUint64ValueFile(cpu_max_freq_path_, 2001000);
// Test the 101% and 100% cases. // Test the 101% and 100% cases.
CreateUint64ValueFile(base::FilePath(kFakeScalingMaxFreqPath), 2001000); CreateUint64ValueFile(scaling_max_freq_path_, 2001000);
EXPECT_TRUE(daemon_.testing_); EXPECT_TRUE(daemon_.testing_);
EXPECT_CALL(metrics_lib_, SendEnumToUMA(_, 101, 101)); EXPECT_CALL(metrics_lib_, SendEnumToUMA(_, 101, 101));
daemon_.SendCpuThrottleMetrics(); daemon_.SendCpuThrottleMetrics();
CreateUint64ValueFile(base::FilePath(kFakeScalingMaxFreqPath), 2000000); CreateUint64ValueFile(scaling_max_freq_path_, 2000000);
EXPECT_CALL(metrics_lib_, SendEnumToUMA(_, 100, 101)); EXPECT_CALL(metrics_lib_, SendEnumToUMA(_, 100, 101));
daemon_.SendCpuThrottleMetrics(); daemon_.SendCpuThrottleMetrics();
} }
@ -370,12 +290,14 @@ TEST_F(MetricsDaemonTest, SendZramMetrics) {
const uint64_t page_size = 4096; const uint64_t page_size = 4096;
const uint64_t zero_pages = 10 * 1000 * 1000 / page_size; const uint64_t zero_pages = 10 * 1000 * 1000 / page_size;
CreateUint64ValueFile(base::FilePath(MetricsDaemon::kComprDataSizeName), CreateUint64ValueFile(
compr_data_size); temp_dir_.path().Append(MetricsDaemon::kComprDataSizeName),
CreateUint64ValueFile(base::FilePath(MetricsDaemon::kOrigDataSizeName), compr_data_size);
orig_data_size); CreateUint64ValueFile(
CreateUint64ValueFile(base::FilePath(MetricsDaemon::kZeroPagesName), temp_dir_.path().Append(MetricsDaemon::kOrigDataSizeName),
zero_pages); orig_data_size);
CreateUint64ValueFile(
temp_dir_.path().Append(MetricsDaemon::kZeroPagesName), zero_pages);
const uint64_t real_orig_size = orig_data_size + zero_pages * page_size; const uint64_t real_orig_size = orig_data_size + zero_pages * page_size;
const uint64_t zero_ratio_percent = const uint64_t zero_ratio_percent =
@ -390,11 +312,5 @@ TEST_F(MetricsDaemonTest, SendZramMetrics) {
EXPECT_CALL(metrics_lib_, SendToUMA(_, zero_pages, _, _, _)); EXPECT_CALL(metrics_lib_, SendToUMA(_, zero_pages, _, _, _));
EXPECT_CALL(metrics_lib_, SendToUMA(_, zero_ratio_percent, _, _, _)); EXPECT_CALL(metrics_lib_, SendToUMA(_, zero_ratio_percent, _, _, _));
EXPECT_TRUE(daemon_.ReportZram(base::FilePath("."))); EXPECT_TRUE(daemon_.ReportZram(temp_dir_.path()));
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} }

View file

@ -140,6 +140,12 @@ void MetricsLibrary::Init() {
uma_events_file_ = metrics::kMetricsEventsFilePath; uma_events_file_ = metrics::kMetricsEventsFilePath;
} }
void MetricsLibrary::InitForTest(const std::string& uma_events_file,
const std::string& consent_file) {
uma_events_file_ = uma_events_file;
consent_file_ = consent_file;
}
bool MetricsLibrary::SendToUMA(const std::string& name, bool MetricsLibrary::SendToUMA(const std::string& name,
int sample, int sample,
int min, int min,

View file

@ -14,130 +14,52 @@
* limitations under the License. * limitations under the License.
*/ */
#include <cstring>
#include <base/files/file_util.h> #include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
#include <gmock/gmock.h> #include <gmock/gmock.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <policy/mock_device_policy.h>
#include <policy/libpolicy.h>
#include "metrics/c_metrics_library.h" #include "metrics/c_metrics_library.h"
#include "metrics/metrics_library.h" #include "metrics/metrics_library.h"
using base::FilePath;
using ::testing::_;
using ::testing::Return;
using ::testing::AnyNumber;
static const FilePath kTestUMAEventsFile("test-uma-events");
static const char kTestMounts[] = "test-mounts";
ACTION_P(SetMetricsPolicy, enabled) {
*arg0 = enabled;
return true;
}
class MetricsLibraryTest : public testing::Test { class MetricsLibraryTest : public testing::Test {
protected: protected:
virtual void SetUp() { virtual void SetUp() {
EXPECT_TRUE(lib_.uma_events_file_.empty()); ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
lib_.Init(); consent_file_ = temp_dir_.path().Append("consent");
EXPECT_FALSE(lib_.uma_events_file_.empty()); uma_events_file_ = temp_dir_.path().Append("events");
lib_.uma_events_file_ = kTestUMAEventsFile.value(); lib_.InitForTest(uma_events_file_.value(), consent_file_.value());
EXPECT_EQ(0, WriteFile(kTestUMAEventsFile, "", 0)); EXPECT_EQ(0, WriteFile(uma_events_file_, "", 0));
device_policy_ = new policy::MockDevicePolicy();
EXPECT_CALL(*device_policy_, LoadPolicy())
.Times(AnyNumber())
.WillRepeatedly(Return(true));
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.Times(AnyNumber())
.WillRepeatedly(SetMetricsPolicy(true));
provider_ = new policy::PolicyProvider(device_policy_);
lib_.SetPolicyProvider(provider_);
// Defeat metrics enabled caching between tests. // Defeat metrics enabled caching between tests.
lib_.cached_enabled_time_ = 0; lib_.cached_enabled_time_ = 0;
} }
virtual void TearDown() { void SetMetricsConsent(bool enabled) {
base::DeleteFile(FilePath(kTestMounts), false); if (enabled) {
base::DeleteFile(kTestUMAEventsFile, false); ASSERT_EQ(base::WriteFile(consent_file_, "", 0), 0);
} else {
ASSERT_TRUE(base::DeleteFile(consent_file_, false));
}
} }
void VerifyEnabledCacheHit(bool to_value); void VerifyEnabledCacheHit(bool to_value);
void VerifyEnabledCacheEviction(bool to_value); void VerifyEnabledCacheEviction(bool to_value);
MetricsLibrary lib_; MetricsLibrary lib_;
policy::MockDevicePolicy* device_policy_; base::ScopedTempDir temp_dir_;
policy::PolicyProvider* provider_; base::FilePath consent_file_;
base::FilePath uma_events_file_;
}; };
TEST_F(MetricsLibraryTest, IsDeviceMounted) {
static const char kTestContents[] =
"0123456789abcde 0123456789abcde\nguestfs foo bar\n";
char buffer[1024];
int block_sizes[] = { 1, 2, 3, 4, 5, 6, 8, 12, 14, 16, 32, 1024 };
bool result;
EXPECT_FALSE(lib_.IsDeviceMounted("guestfs",
"nonexistent",
buffer,
1,
&result));
ASSERT_TRUE(base::WriteFile(base::FilePath(kTestMounts),
kTestContents,
strlen(kTestContents)));
EXPECT_FALSE(lib_.IsDeviceMounted("guestfs",
kTestMounts,
buffer,
0,
&result));
for (size_t i = 0; i < arraysize(block_sizes); ++i) {
EXPECT_TRUE(lib_.IsDeviceMounted("0123456789abcde",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_TRUE(result);
EXPECT_TRUE(lib_.IsDeviceMounted("guestfs",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_TRUE(result);
EXPECT_TRUE(lib_.IsDeviceMounted("0123456",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_FALSE(result);
EXPECT_TRUE(lib_.IsDeviceMounted("9abcde",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_FALSE(result);
EXPECT_TRUE(lib_.IsDeviceMounted("foo",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_FALSE(result);
EXPECT_TRUE(lib_.IsDeviceMounted("bar",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_FALSE(result);
}
}
TEST_F(MetricsLibraryTest, AreMetricsEnabledFalse) { TEST_F(MetricsLibraryTest, AreMetricsEnabledFalse) {
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_)) SetMetricsConsent(false);
.WillOnce(SetMetricsPolicy(false));
EXPECT_FALSE(lib_.AreMetricsEnabled()); EXPECT_FALSE(lib_.AreMetricsEnabled());
} }
TEST_F(MetricsLibraryTest, AreMetricsEnabledTrue) { TEST_F(MetricsLibraryTest, AreMetricsEnabledTrue) {
SetMetricsConsent(true);
EXPECT_TRUE(lib_.AreMetricsEnabled()); EXPECT_TRUE(lib_.AreMetricsEnabled());
} }
@ -146,12 +68,12 @@ void MetricsLibraryTest::VerifyEnabledCacheHit(bool to_value) {
// times in a row. // times in a row.
for (int i = 0; i < 100; ++i) { for (int i = 0; i < 100; ++i) {
lib_.cached_enabled_time_ = 0; lib_.cached_enabled_time_ = 0;
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_)) SetMetricsConsent(to_value);
.WillOnce(SetMetricsPolicy(!to_value)); lib_.AreMetricsEnabled();
ASSERT_EQ(!to_value, lib_.AreMetricsEnabled()); // If we check the metrics status twice in a row, we use the cached value
ON_CALL(*device_policy_, GetMetricsEnabled(_)) // the second time.
.WillByDefault(SetMetricsPolicy(to_value)); SetMetricsConsent(!to_value);
if (lib_.AreMetricsEnabled() == !to_value) if (lib_.AreMetricsEnabled() == to_value)
return; return;
} }
ADD_FAILURE() << "Did not see evidence of caching"; ADD_FAILURE() << "Did not see evidence of caching";
@ -159,14 +81,12 @@ void MetricsLibraryTest::VerifyEnabledCacheHit(bool to_value) {
void MetricsLibraryTest::VerifyEnabledCacheEviction(bool to_value) { void MetricsLibraryTest::VerifyEnabledCacheEviction(bool to_value) {
lib_.cached_enabled_time_ = 0; lib_.cached_enabled_time_ = 0;
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_)) SetMetricsConsent(!to_value);
.WillOnce(SetMetricsPolicy(!to_value));
ASSERT_EQ(!to_value, lib_.AreMetricsEnabled()); ASSERT_EQ(!to_value, lib_.AreMetricsEnabled());
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.WillOnce(SetMetricsPolicy(to_value)); SetMetricsConsent(to_value);
ASSERT_LT(abs(static_cast<int>(time(nullptr) - lib_.cached_enabled_time_)), // Sleep one second (or cheat to be faster) and check that we are not using
5); // the cached value.
// Sleep one second (or cheat to be faster).
--lib_.cached_enabled_time_; --lib_.cached_enabled_time_;
ASSERT_EQ(to_value, lib_.AreMetricsEnabled()); ASSERT_EQ(to_value, lib_.AreMetricsEnabled());
} }
@ -177,50 +97,3 @@ TEST_F(MetricsLibraryTest, AreMetricsEnabledCaching) {
VerifyEnabledCacheEviction(false); VerifyEnabledCacheEviction(false);
VerifyEnabledCacheEviction(true); VerifyEnabledCacheEviction(true);
} }
class CMetricsLibraryTest : public testing::Test {
protected:
virtual void SetUp() {
lib_ = CMetricsLibraryNew();
MetricsLibrary& ml = *reinterpret_cast<MetricsLibrary*>(lib_);
EXPECT_TRUE(ml.uma_events_file_.empty());
CMetricsLibraryInit(lib_);
EXPECT_FALSE(ml.uma_events_file_.empty());
ml.uma_events_file_ = kTestUMAEventsFile.value();
EXPECT_EQ(0, WriteFile(kTestUMAEventsFile, "", 0));
device_policy_ = new policy::MockDevicePolicy();
EXPECT_CALL(*device_policy_, LoadPolicy())
.Times(AnyNumber())
.WillRepeatedly(Return(true));
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.Times(AnyNumber())
.WillRepeatedly(SetMetricsPolicy(true));
provider_ = new policy::PolicyProvider(device_policy_);
ml.SetPolicyProvider(provider_);
reinterpret_cast<MetricsLibrary*>(lib_)->cached_enabled_time_ = 0;
}
virtual void TearDown() {
CMetricsLibraryDelete(lib_);
base::DeleteFile(kTestUMAEventsFile, false);
}
CMetricsLibrary lib_;
policy::MockDevicePolicy* device_policy_;
policy::PolicyProvider* provider_;
};
TEST_F(CMetricsLibraryTest, AreMetricsEnabledFalse) {
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.WillOnce(SetMetricsPolicy(false));
EXPECT_FALSE(CMetricsLibraryAreMetricsEnabled(lib_));
}
TEST_F(CMetricsLibraryTest, AreMetricsEnabledTrue) {
EXPECT_TRUE(CMetricsLibraryAreMetricsEnabled(lib_));
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View file

@ -28,18 +28,14 @@
namespace chromeos_metrics { namespace chromeos_metrics {
// Static class member instantiation. // Static class member instantiation.
bool PersistentInteger::testing_ = false; std::string PersistentInteger::metrics_directory_ = metrics::kMetricsDirectory;
PersistentInteger::PersistentInteger(const std::string& name) : PersistentInteger::PersistentInteger(const std::string& name) :
value_(0), value_(0),
version_(kVersion), version_(kVersion),
name_(name), name_(name),
synced_(false) { synced_(false) {
if (testing_) { backing_file_name_ = metrics_directory_ + name_;
backing_file_name_ = name_;
} else {
backing_file_name_ = metrics::kMetricsDirectory + name_;
}
} }
PersistentInteger::~PersistentInteger() {} PersistentInteger::~PersistentInteger() {}
@ -100,8 +96,8 @@ bool PersistentInteger::Read() {
return read_succeeded; return read_succeeded;
} }
void PersistentInteger::SetTestingMode(bool testing) { void PersistentInteger::SetMetricsDirectory(const std::string& directory) {
testing_ = testing; metrics_directory_ = directory;
} }

View file

@ -50,10 +50,9 @@ class PersistentInteger {
// Virtual only because of mock. // Virtual only because of mock.
virtual void Add(int64_t x); virtual void Add(int64_t x);
// After calling with |testing| = true, changes some behavior for the purpose // Sets the directory path for all persistent integers.
// of testing. For instance: instances created while testing use the current // This is used in unittests to change where the counters are stored.
// directory for the backing files. static void SetMetricsDirectory(const std::string& directory);
static void SetTestingMode(bool testing);
private: private:
static const int kVersion = 1001; static const int kVersion = 1001;
@ -70,8 +69,8 @@ class PersistentInteger {
int32_t version_; int32_t version_;
std::string name_; std::string name_;
std::string backing_file_name_; std::string backing_file_name_;
static std::string metrics_directory_;
bool synced_; bool synced_;
static bool testing_;
}; };
} // namespace chromeos_metrics } // namespace chromeos_metrics

View file

@ -19,6 +19,7 @@
#include <base/compiler_specific.h> #include <base/compiler_specific.h>
#include <base/files/file_enumerator.h> #include <base/files/file_enumerator.h>
#include <base/files/file_util.h> #include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
#include "persistent_integer.h" #include "persistent_integer.h"
@ -30,7 +31,9 @@ using chromeos_metrics::PersistentInteger;
class PersistentIntegerTest : public testing::Test { class PersistentIntegerTest : public testing::Test {
void SetUp() override { void SetUp() override {
// Set testing mode. // Set testing mode.
chromeos_metrics::PersistentInteger::SetTestingMode(true); ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
chromeos_metrics::PersistentInteger::SetMetricsDirectory(
temp_dir_.path().value());
} }
void TearDown() override { void TearDown() override {
@ -45,6 +48,8 @@ class PersistentIntegerTest : public testing::Test {
base::DeleteFile(name, false); base::DeleteFile(name, false);
} }
} }
base::ScopedTempDir temp_dir_;
}; };
TEST_F(PersistentIntegerTest, BasicChecks) { TEST_F(PersistentIntegerTest, BasicChecks) {
@ -71,8 +76,3 @@ TEST_F(PersistentIntegerTest, BasicChecks) {
pi.reset(new PersistentInteger(kBackingFileName)); pi.reset(new PersistentInteger(kBackingFileName));
EXPECT_EQ(0, pi->Get()); EXPECT_EQ(0, pi->Get());
} }
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View file

@ -26,7 +26,9 @@ class ChromeUserMetricsExtension;
// Mock profile setter used for testing. // Mock profile setter used for testing.
class MockSystemProfileSetter : public SystemProfileSetter { class MockSystemProfileSetter : public SystemProfileSetter {
public: public:
void Populate(metrics::ChromeUserMetricsExtension* profile_proto) override {} bool Populate(metrics::ChromeUserMetricsExtension* profile_proto) override {
return true;
}
}; };
#endif // METRICS_UPLOADER_MOCK_MOCK_SYSTEM_PROFILE_SETTER_H_ #endif // METRICS_UPLOADER_MOCK_MOCK_SYSTEM_PROFILE_SETTER_H_

View file

@ -61,7 +61,7 @@ SystemProfileCache::SystemProfileCache()
} }
SystemProfileCache::SystemProfileCache(bool testing, SystemProfileCache::SystemProfileCache(bool testing,
const std::string& config_root) const base::FilePath& config_root)
: initialized_(false), : initialized_(false),
testing_(testing), testing_(testing),
config_root_(config_root), config_root_(config_root),
@ -73,9 +73,7 @@ bool SystemProfileCache::Initialize() {
CHECK(!initialized_) CHECK(!initialized_)
<< "this should be called only once in the metrics_daemon lifetime."; << "this should be called only once in the metrics_daemon lifetime.";
char property_value[PROPERTY_VALUE_MAX]; profile_.build_target_id = GetProperty(metrics::kBuildTargetIdProperty);
property_get(metrics::kBuildTargetIdProperty, property_value, "");
profile_.build_target_id = std::string(property_value);
if (profile_.build_target_id.empty()) { if (profile_.build_target_id.empty()) {
LOG(ERROR) << "System property " << metrics::kBuildTargetIdProperty LOG(ERROR) << "System property " << metrics::kBuildTargetIdProperty
@ -83,11 +81,8 @@ bool SystemProfileCache::Initialize() {
return false; return false;
} }
property_get(metrics::kChannelProperty, property_value, ""); std::string channel = GetProperty(metrics::kChannelProperty);
std::string channel(property_value); profile_.version = GetProperty(metrics::kProductVersionProperty);
property_get(metrics::kProductVersionProperty, property_value, "");
profile_.version = std::string(property_value);
if (channel.empty() || profile_.version.empty()) { if (channel.empty() || profile_.version.empty()) {
// If the channel or version is missing, the image is not official. // If the channel or version is missing, the image is not official.
@ -157,6 +152,18 @@ std::string SystemProfileCache::GetPersistentGUID(
return guid; return guid;
} }
std::string SystemProfileCache::GetProperty(const std::string& name) {
if (testing_) {
std::string content;
base::ReadFileToString(config_root_.Append(name), &content);
return content;
} else {
char value[PROPERTY_VALUE_MAX];
property_get(name.data(), value, "");
return std::string(value);
}
}
metrics::SystemProfileProto_Channel SystemProfileCache::ProtoChannelFromString( metrics::SystemProfileProto_Channel SystemProfileCache::ProtoChannelFromString(
const std::string& channel) { const std::string& channel) {
if (channel == "stable") { if (channel == "stable") {

View file

@ -22,6 +22,7 @@
#include <string> #include <string>
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/files/file_path.h"
#include "base/gtest_prod_util.h" #include "base/gtest_prod_util.h"
#include "base/memory/scoped_ptr.h" #include "base/memory/scoped_ptr.h"
#include "persistent_integer.h" #include "persistent_integer.h"
@ -49,7 +50,7 @@ class SystemProfileCache : public SystemProfileSetter {
public: public:
SystemProfileCache(); SystemProfileCache();
SystemProfileCache(bool testing, const std::string& config_root); SystemProfileCache(bool testing, const base::FilePath& config_root);
// Populates the ProfileSystem protobuf with system information. // Populates the ProfileSystem protobuf with system information.
bool Populate(metrics::ChromeUserMetricsExtension* metrics_proto) override; bool Populate(metrics::ChromeUserMetricsExtension* metrics_proto) override;
@ -75,9 +76,14 @@ class SystemProfileCache : public SystemProfileSetter {
// Initializes |profile_| only if it has not been yet initialized. // Initializes |profile_| only if it has not been yet initialized.
bool InitializeOrCheck(); bool InitializeOrCheck();
// Gets a system property as a string.
// When |testing_| is true, reads the value from |config_root_|/|name|
// instead.
std::string GetProperty(const std::string& name);
bool initialized_; bool initialized_;
bool testing_; bool testing_;
std::string config_root_; base::FilePath config_root_;
scoped_ptr<chromeos_metrics::PersistentInteger> session_id_; scoped_ptr<chromeos_metrics::PersistentInteger> session_id_;
SystemProfile profile_; SystemProfile profile_;
}; };

View file

@ -16,11 +16,13 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "base/at_exit.h" #include <base/at_exit.h>
#include "base/files/file_util.h" #include <base/files/file_util.h>
#include "base/files/scoped_temp_dir.h" #include <base/files/scoped_temp_dir.h>
#include "base/logging.h" #include <base/logging.h>
#include "base/sys_info.h" #include <base/sys_info.h>
#include "constants.h"
#include "metrics_library_mock.h" #include "metrics_library_mock.h"
#include "serialization/metric_sample.h" #include "serialization/metric_sample.h"
#include "uploader/metrics_log.h" #include "uploader/metrics_log.h"
@ -34,35 +36,35 @@
class UploadServiceTest : public testing::Test { class UploadServiceTest : public testing::Test {
protected: protected:
UploadServiceTest()
: cache_(true, "/"),
upload_service_(new MockSystemProfileSetter(), &metrics_lib_,
kMetricsServer, true),
exit_manager_(new base::AtExitManager()) {
sender_ = new SenderMock;
upload_service_.sender_.reset(sender_);
upload_service_.Init(base::TimeDelta::FromMinutes(30), kMetricsFilePath);
}
virtual void SetUp() { virtual void SetUp() {
CHECK(dir_.CreateUniqueTempDir()); CHECK(dir_.CreateUniqueTempDir());
upload_service_.GatherHistograms(); upload_service_.reset(new UploadService(new MockSystemProfileSetter(),
upload_service_.Reset(); &metrics_lib_, "", true));
sender_->Reset();
chromeos_metrics::PersistentInteger::SetTestingMode(true); upload_service_->sender_.reset(new SenderMock);
cache_.session_id_.reset(new chromeos_metrics::PersistentInteger( event_file_ = dir_.path().Append("event");
dir_.path().Append("session_id").value())); upload_service_->Init(base::TimeDelta::FromMinutes(30), event_file_.value());
upload_service_->GatherHistograms();
upload_service_->Reset();
chromeos_metrics::PersistentInteger::SetMetricsDirectory(
dir_.path().value());
} }
scoped_ptr<metrics::MetricSample> Crash(const std::string& name) { scoped_ptr<metrics::MetricSample> Crash(const std::string& name) {
return metrics::MetricSample::CrashSample(name); return metrics::MetricSample::CrashSample(name);
} }
void SetTestingProperty(const std::string& name, const std::string& value) {
ASSERT_EQ(
value.size(),
base::WriteFile(dir_.path().Append(name), value.data(), value.size()));
}
base::FilePath event_file_;
base::ScopedTempDir dir_; base::ScopedTempDir dir_;
SenderMock* sender_; scoped_ptr<UploadService> upload_service_;
SystemProfileCache cache_;
UploadService upload_service_;
MetricsLibraryMock metrics_lib_; MetricsLibraryMock metrics_lib_;
scoped_ptr<base::AtExitManager> exit_manager_; scoped_ptr<base::AtExitManager> exit_manager_;
@ -70,18 +72,18 @@ class UploadServiceTest : public testing::Test {
// Tests that the right crash increments a values. // Tests that the right crash increments a values.
TEST_F(UploadServiceTest, LogUserCrash) { TEST_F(UploadServiceTest, LogUserCrash) {
upload_service_.AddSample(*Crash("user").get()); upload_service_->AddSample(*Crash("user").get());
MetricsLog* log = upload_service_.current_log_.get(); MetricsLog* log = upload_service_->current_log_.get();
metrics::ChromeUserMetricsExtension* proto = log->uma_proto(); metrics::ChromeUserMetricsExtension* proto = log->uma_proto();
EXPECT_EQ(1, proto->system_profile().stability().other_user_crash_count()); EXPECT_EQ(1, proto->system_profile().stability().other_user_crash_count());
} }
TEST_F(UploadServiceTest, LogUncleanShutdown) { TEST_F(UploadServiceTest, LogUncleanShutdown) {
upload_service_.AddSample(*Crash("uncleanshutdown")); upload_service_->AddSample(*Crash("uncleanshutdown"));
EXPECT_EQ(1, upload_service_.current_log_ EXPECT_EQ(1, upload_service_->current_log_
->uma_proto() ->uma_proto()
->system_profile() ->system_profile()
.stability() .stability()
@ -89,9 +91,9 @@ TEST_F(UploadServiceTest, LogUncleanShutdown) {
} }
TEST_F(UploadServiceTest, LogKernelCrash) { TEST_F(UploadServiceTest, LogKernelCrash) {
upload_service_.AddSample(*Crash("kernel")); upload_service_->AddSample(*Crash("kernel"));
EXPECT_EQ(1, upload_service_.current_log_ EXPECT_EQ(1, upload_service_->current_log_
->uma_proto() ->uma_proto()
->system_profile() ->system_profile()
.stability() .stability()
@ -99,47 +101,56 @@ TEST_F(UploadServiceTest, LogKernelCrash) {
} }
TEST_F(UploadServiceTest, UnknownCrashIgnored) { TEST_F(UploadServiceTest, UnknownCrashIgnored) {
upload_service_.AddSample(*Crash("foo")); upload_service_->AddSample(*Crash("foo"));
// The log should be empty. // The log should be empty.
EXPECT_FALSE(upload_service_.current_log_); EXPECT_FALSE(upload_service_->current_log_);
} }
TEST_F(UploadServiceTest, FailedSendAreRetried) { TEST_F(UploadServiceTest, FailedSendAreRetried) {
sender_->set_should_succeed(false); SenderMock* sender = new SenderMock();
upload_service_->sender_.reset(sender);
upload_service_.AddSample(*Crash("user")); sender->set_should_succeed(false);
upload_service_.UploadEvent();
EXPECT_EQ(1, sender_->send_call_count());
std::string sent_string = sender_->last_message();
upload_service_.UploadEvent(); upload_service_->AddSample(*Crash("user"));
EXPECT_EQ(2, sender_->send_call_count()); upload_service_->UploadEvent();
EXPECT_EQ(sent_string, sender_->last_message()); EXPECT_EQ(1, sender->send_call_count());
std::string sent_string = sender->last_message();
upload_service_->UploadEvent();
EXPECT_EQ(2, sender->send_call_count());
EXPECT_EQ(sent_string, sender->last_message());
} }
TEST_F(UploadServiceTest, DiscardLogsAfterTooManyFailedUpload) { TEST_F(UploadServiceTest, DiscardLogsAfterTooManyFailedUpload) {
sender_->set_should_succeed(false); SenderMock* sender = new SenderMock();
upload_service_.AddSample(*Crash("user")); upload_service_->sender_.reset(sender);
sender->set_should_succeed(false);
upload_service_->AddSample(*Crash("user"));
for (int i = 0; i < UploadService::kMaxFailedUpload; i++) { for (int i = 0; i < UploadService::kMaxFailedUpload; i++) {
upload_service_.UploadEvent(); upload_service_->UploadEvent();
} }
EXPECT_TRUE(upload_service_.staged_log_); EXPECT_TRUE(upload_service_->staged_log_);
upload_service_.UploadEvent(); upload_service_->UploadEvent();
EXPECT_FALSE(upload_service_.staged_log_); EXPECT_FALSE(upload_service_->staged_log_);
} }
TEST_F(UploadServiceTest, EmptyLogsAreNotSent) { TEST_F(UploadServiceTest, EmptyLogsAreNotSent) {
upload_service_.UploadEvent(); SenderMock* sender = new SenderMock();
EXPECT_FALSE(upload_service_.current_log_); upload_service_->sender_.reset(sender);
EXPECT_EQ(0, sender_->send_call_count()); upload_service_->UploadEvent();
EXPECT_FALSE(upload_service_->current_log_);
EXPECT_EQ(0, sender->send_call_count());
} }
TEST_F(UploadServiceTest, LogEmptyByDefault) { TEST_F(UploadServiceTest, LogEmptyByDefault) {
UploadService upload_service(new MockSystemProfileSetter(), &metrics_lib_, UploadService upload_service(new MockSystemProfileSetter(), &metrics_lib_,
kMetricsServer); "");
// current_log_ should be initialized later as it needs AtExitManager to exit // current_log_ should be initialized later as it needs AtExitManager to exit
// in order to gather system information from SysInfo. // in order to gather system information from SysInfo.
@ -147,39 +158,42 @@ TEST_F(UploadServiceTest, LogEmptyByDefault) {
} }
TEST_F(UploadServiceTest, CanSendMultipleTimes) { TEST_F(UploadServiceTest, CanSendMultipleTimes) {
upload_service_.AddSample(*Crash("user")); SenderMock* sender = new SenderMock();
upload_service_.UploadEvent(); upload_service_->sender_.reset(sender);
std::string first_message = sender_->last_message(); upload_service_->AddSample(*Crash("user"));
upload_service_->UploadEvent();
upload_service_.AddSample(*Crash("kernel")); std::string first_message = sender->last_message();
upload_service_.UploadEvent();
EXPECT_NE(first_message, sender_->last_message()); upload_service_->AddSample(*Crash("kernel"));
upload_service_->UploadEvent();
EXPECT_NE(first_message, sender->last_message());
} }
TEST_F(UploadServiceTest, LogEmptyAfterUpload) { TEST_F(UploadServiceTest, LogEmptyAfterUpload) {
upload_service_.AddSample(*Crash("user")); upload_service_->AddSample(*Crash("user"));
EXPECT_TRUE(upload_service_.current_log_); EXPECT_TRUE(upload_service_->current_log_);
upload_service_.UploadEvent(); upload_service_->UploadEvent();
EXPECT_FALSE(upload_service_.current_log_); EXPECT_FALSE(upload_service_->current_log_);
} }
TEST_F(UploadServiceTest, LogContainsAggregatedValues) { TEST_F(UploadServiceTest, LogContainsAggregatedValues) {
scoped_ptr<metrics::MetricSample> histogram = scoped_ptr<metrics::MetricSample> histogram =
metrics::MetricSample::HistogramSample("foo", 10, 0, 42, 10); metrics::MetricSample::HistogramSample("foo", 10, 0, 42, 10);
upload_service_.AddSample(*histogram.get()); upload_service_->AddSample(*histogram.get());
scoped_ptr<metrics::MetricSample> histogram2 = scoped_ptr<metrics::MetricSample> histogram2 =
metrics::MetricSample::HistogramSample("foo", 11, 0, 42, 10); metrics::MetricSample::HistogramSample("foo", 11, 0, 42, 10);
upload_service_.AddSample(*histogram2.get()); upload_service_->AddSample(*histogram2.get());
upload_service_.GatherHistograms(); upload_service_->GatherHistograms();
metrics::ChromeUserMetricsExtension* proto = metrics::ChromeUserMetricsExtension* proto =
upload_service_.current_log_->uma_proto(); upload_service_->current_log_->uma_proto();
EXPECT_EQ(1, proto->histogram_event().size()); EXPECT_EQ(1, proto->histogram_event().size());
} }
@ -190,46 +204,41 @@ TEST_F(UploadServiceTest, ExtractChannelFromString) {
metrics::SystemProfileProto::CHANNEL_UNKNOWN); metrics::SystemProfileProto::CHANNEL_UNKNOWN);
EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_DEV, EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_DEV,
SystemProfileCache::ProtoChannelFromString("dev-channel")); SystemProfileCache::ProtoChannelFromString("dev"));
EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_STABLE,
SystemProfileCache::ProtoChannelFromString("stable"));
EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_UNKNOWN, EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_UNKNOWN,
SystemProfileCache::ProtoChannelFromString("dev-channel test")); SystemProfileCache::ProtoChannelFromString("this is a test"));
} }
TEST_F(UploadServiceTest, ValuesInConfigFileAreSent) { TEST_F(UploadServiceTest, ValuesInConfigFileAreSent) {
std::string name("os name"); SenderMock* sender = new SenderMock();
std::string content( upload_service_->sender_.reset(sender);
"CHROMEOS_RELEASE_NAME=" + name +
"\nCHROMEOS_RELEASE_VERSION=version\n" SetTestingProperty(metrics::kChannelProperty, "beta");
"CHROMEOS_RELEASE_DESCRIPTION=description beta-channel test\n" SetTestingProperty(metrics::kBuildTargetIdProperty, "hello");
"CHROMEOS_RELEASE_TRACK=beta-channel\n" SetTestingProperty(metrics::kProductVersionProperty, "1.2.3.4");
"CHROMEOS_RELEASE_BUILD_TYPE=developer build\n"
"CHROMEOS_RELEASE_BOARD=myboard");
base::SysInfo::SetChromeOSVersionInfoForTest(content, base::Time());
scoped_ptr<metrics::MetricSample> histogram = scoped_ptr<metrics::MetricSample> histogram =
metrics::MetricSample::SparseHistogramSample("myhistogram", 1); metrics::MetricSample::SparseHistogramSample("myhistogram", 1);
SystemProfileCache* local_cache_ = new SystemProfileCache(true, "/");
local_cache_->session_id_.reset(new chromeos_metrics::PersistentInteger(
dir_.path().Append("session_id").value()));
upload_service_.system_profile_setter_.reset(local_cache_);
// Reset to create the new log with the profile setter. // Reset to create the new log with the profile setter.
upload_service_.Reset(); upload_service_->system_profile_setter_.reset(
upload_service_.AddSample(*histogram.get()); new SystemProfileCache(true, dir_.path()));
upload_service_.UploadEvent(); upload_service_->Reset();
upload_service_->AddSample(*histogram.get());
upload_service_->UploadEvent();
EXPECT_EQ(1, sender_->send_call_count()); EXPECT_EQ(1, sender->send_call_count());
EXPECT_TRUE(sender_->is_good_proto()); EXPECT_TRUE(sender->is_good_proto());
EXPECT_EQ(1, sender_->last_message_proto().histogram_event().size()); EXPECT_EQ(1, sender->last_message_proto().histogram_event().size());
EXPECT_EQ(name, sender_->last_message_proto().system_profile().os().name());
EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_BETA, EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_BETA,
sender_->last_message_proto().system_profile().channel()); sender->last_message_proto().system_profile().channel());
EXPECT_NE(0, sender_->last_message_proto().client_id()); EXPECT_NE(0, sender->last_message_proto().client_id());
EXPECT_NE(0, EXPECT_NE(0, sender->last_message_proto().system_profile().build_timestamp());
sender_->last_message_proto().system_profile().build_timestamp()); EXPECT_NE(0, sender->last_message_proto().session_id());
EXPECT_NE(0, sender_->last_message_proto().session_id());
} }
TEST_F(UploadServiceTest, PersistentGUID) { TEST_F(UploadServiceTest, PersistentGUID) {
@ -252,15 +261,11 @@ TEST_F(UploadServiceTest, PersistentGUID) {
} }
TEST_F(UploadServiceTest, SessionIdIncrementedAtInitialization) { TEST_F(UploadServiceTest, SessionIdIncrementedAtInitialization) {
cache_.Initialize(); SetTestingProperty(metrics::kBuildTargetIdProperty, "hello");
int session_id = cache_.profile_.session_id; SystemProfileCache cache(true, dir_.path());
cache_.initialized_ = false; cache.Initialize();
cache_.Initialize(); int session_id = cache.profile_.session_id;
EXPECT_EQ(cache_.profile_.session_id, session_id + 1); cache.initialized_ = false;
} cache.Initialize();
EXPECT_EQ(cache.profile_.session_id, session_id + 1);
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} }