forked from mirrors/gecko-dev
Bug 1866629 - Remove the sample_groups object in serialized counters, r=canaltinova,profiler-reviewers.
Differential Revision: https://phabricator.services.mozilla.com/D194695
This commit is contained in:
parent
9503eb0aca
commit
03bd9de29c
7 changed files with 245 additions and 328 deletions
|
|
@ -948,15 +948,13 @@ void ProfileBuffer::StreamProfilerOverheadToJSON(
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CounterKeyedSample {
|
struct CounterSample {
|
||||||
double mTime;
|
double mTime;
|
||||||
uint64_t mNumber;
|
uint64_t mNumber;
|
||||||
int64_t mCount;
|
int64_t mCount;
|
||||||
};
|
};
|
||||||
|
|
||||||
using CounterKeyedSamples = Vector<CounterKeyedSample>;
|
using CounterSamples = Vector<CounterSample>;
|
||||||
|
|
||||||
using CounterMap = HashMap<uint64_t, CounterKeyedSamples>;
|
|
||||||
|
|
||||||
// HashMap lookup, if not found, a default value is inserted.
|
// HashMap lookup, if not found, a default value is inserted.
|
||||||
// Returns reference to (existing or new) value inside the HashMap.
|
// Returns reference to (existing or new) value inside the HashMap.
|
||||||
|
|
@ -993,41 +991,38 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
|
||||||
// Valid sequence in the buffer:
|
// Valid sequence in the buffer:
|
||||||
// CounterID
|
// CounterID
|
||||||
// Time
|
// Time
|
||||||
// ( CounterKey Count Number? )*
|
// ( Count Number? )*
|
||||||
//
|
//
|
||||||
// And the JSON (example):
|
// And the JSON (example):
|
||||||
// "counters": {
|
// "counters": {
|
||||||
// "name": "malloc",
|
// "name": "malloc",
|
||||||
// "category": "Memory",
|
// "category": "Memory",
|
||||||
// "description": "Amount of allocated memory",
|
// "description": "Amount of allocated memory",
|
||||||
// "sample_groups": {
|
// "samples": {
|
||||||
// "id": 0,
|
// "schema": {"time": 0, "count": 1, "number": 2},
|
||||||
// "samples": {
|
// "data": [
|
||||||
// "schema": {"time": 0, "number": 1, "count": 2},
|
// [
|
||||||
// "data": [
|
// 16117.033968000002,
|
||||||
// [
|
// 2446216,
|
||||||
// 16117.033968000002,
|
// 6801320
|
||||||
// 2446216,
|
|
||||||
// 6801320
|
|
||||||
// ],
|
|
||||||
// [
|
|
||||||
// 16118.037638,
|
|
||||||
// 2446216,
|
|
||||||
// 6801320
|
|
||||||
// ],
|
|
||||||
// ],
|
// ],
|
||||||
// }
|
// [
|
||||||
// }
|
// 16118.037638,
|
||||||
// },
|
// 2446216,
|
||||||
|
// 6801320
|
||||||
|
// ],
|
||||||
|
// ],
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
|
||||||
// Build the map of counters and populate it
|
// Build the map of counters and populate it
|
||||||
HashMap<void*, CounterMap> counters;
|
HashMap<void*, CounterSamples> counters;
|
||||||
|
|
||||||
while (e.Has()) {
|
while (e.Has()) {
|
||||||
// skip all non-Counters, including if we start in the middle of a counter
|
// skip all non-Counters, including if we start in the middle of a counter
|
||||||
if (e.Get().IsCounterId()) {
|
if (e.Get().IsCounterId()) {
|
||||||
void* id = e.Get().GetPtr();
|
void* id = e.Get().GetPtr();
|
||||||
CounterMap& counter = LookupOrAdd(counters, id);
|
CounterSamples& data = LookupOrAdd(counters, id);
|
||||||
e.Next();
|
e.Next();
|
||||||
if (!e.Has() || !e.Get().IsTime()) {
|
if (!e.Has() || !e.Get().IsTime()) {
|
||||||
ERROR_AND_CONTINUE("expected a Time entry");
|
ERROR_AND_CONTINUE("expected a Time entry");
|
||||||
|
|
@ -1035,25 +1030,20 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
|
||||||
double time = e.Get().GetDouble();
|
double time = e.Get().GetDouble();
|
||||||
e.Next();
|
e.Next();
|
||||||
if (time >= aSinceTime) {
|
if (time >= aSinceTime) {
|
||||||
while (e.Has() && e.Get().IsCounterKey()) {
|
if (!e.Has() || !e.Get().IsCount()) {
|
||||||
uint64_t key = e.Get().GetUint64();
|
ERROR_AND_CONTINUE("expected a Count entry");
|
||||||
CounterKeyedSamples& data = LookupOrAdd(counter, key);
|
|
||||||
e.Next();
|
|
||||||
if (!e.Has() || !e.Get().IsCount()) {
|
|
||||||
ERROR_AND_CONTINUE("expected a Count entry");
|
|
||||||
}
|
|
||||||
int64_t count = e.Get().GetUint64();
|
|
||||||
e.Next();
|
|
||||||
uint64_t number;
|
|
||||||
if (!e.Has() || !e.Get().IsNumber()) {
|
|
||||||
number = 0;
|
|
||||||
} else {
|
|
||||||
number = e.Get().GetInt64();
|
|
||||||
e.Next();
|
|
||||||
}
|
|
||||||
CounterKeyedSample sample = {time, number, count};
|
|
||||||
MOZ_RELEASE_ASSERT(data.append(sample));
|
|
||||||
}
|
}
|
||||||
|
int64_t count = e.Get().GetUint64();
|
||||||
|
e.Next();
|
||||||
|
uint64_t number;
|
||||||
|
if (!e.Has() || !e.Get().IsNumber()) {
|
||||||
|
number = 0;
|
||||||
|
} else {
|
||||||
|
number = e.Get().GetInt64();
|
||||||
|
e.Next();
|
||||||
|
}
|
||||||
|
CounterSample sample = {time, number, count};
|
||||||
|
MOZ_RELEASE_ASSERT(data.append(sample));
|
||||||
} else {
|
} else {
|
||||||
// skip counter sample - only need to skip the initial counter
|
// skip counter sample - only need to skip the initial counter
|
||||||
// id, then let the loop at the top skip the rest
|
// id, then let the loop at the top skip the rest
|
||||||
|
|
@ -1062,14 +1052,18 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
|
||||||
e.Next();
|
e.Next();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// we have a map of a map of counter entries; dump them to JSON
|
// we have a map of counter entries; dump them to JSON
|
||||||
if (counters.count() == 0) {
|
if (counters.count() == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
aWriter.StartArrayProperty("counters");
|
aWriter.StartArrayProperty("counters");
|
||||||
for (auto iter = counters.iter(); !iter.done(); iter.next()) {
|
for (auto iter = counters.iter(); !iter.done(); iter.next()) {
|
||||||
CounterMap& counter = iter.get().value();
|
CounterSamples& samples = iter.get().value();
|
||||||
|
size_t size = samples.length();
|
||||||
|
if (size == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
const BaseProfilerCount* base_counter =
|
const BaseProfilerCount* base_counter =
|
||||||
static_cast<const BaseProfilerCount*>(iter.get().key());
|
static_cast<const BaseProfilerCount*>(iter.get().key());
|
||||||
|
|
||||||
|
|
@ -1080,70 +1074,51 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
|
||||||
aWriter.StringProperty("description",
|
aWriter.StringProperty("description",
|
||||||
MakeStringSpan(base_counter->mDescription));
|
MakeStringSpan(base_counter->mDescription));
|
||||||
|
|
||||||
aWriter.StartArrayProperty("sample_groups");
|
bool hasNumber = false;
|
||||||
for (auto counter_iter = counter.iter(); !counter_iter.done();
|
for (size_t i = 0; i < size; i++) {
|
||||||
counter_iter.next()) {
|
if (samples[i].mNumber != 0) {
|
||||||
CounterKeyedSamples& samples = counter_iter.get().value();
|
hasNumber = true;
|
||||||
uint64_t key = counter_iter.get().key();
|
break;
|
||||||
|
|
||||||
size_t size = samples.length();
|
|
||||||
if (size == 0) {
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasNumber = false;
|
|
||||||
for (size_t i = 0; i < size; i++) {
|
|
||||||
if (samples[i].mNumber != 0) {
|
|
||||||
hasNumber = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
aWriter.StartObjectElement();
|
|
||||||
{
|
|
||||||
aWriter.IntProperty("id", static_cast<int64_t>(key));
|
|
||||||
aWriter.StartObjectProperty("samples");
|
|
||||||
{
|
|
||||||
JSONSchemaWriter schema(aWriter);
|
|
||||||
schema.WriteField("time");
|
|
||||||
schema.WriteField("count");
|
|
||||||
if (hasNumber) {
|
|
||||||
schema.WriteField("number");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
aWriter.StartArrayProperty("data");
|
|
||||||
uint64_t previousNumber = 0;
|
|
||||||
int64_t previousCount = 0;
|
|
||||||
for (size_t i = 0; i < size; i++) {
|
|
||||||
// Encode as deltas, and only encode if different than the last
|
|
||||||
// sample
|
|
||||||
if (i == 0 || samples[i].mNumber != previousNumber ||
|
|
||||||
samples[i].mCount != previousCount) {
|
|
||||||
MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
|
|
||||||
MOZ_ASSERT(samples[i].mNumber >= previousNumber);
|
|
||||||
MOZ_ASSERT(samples[i].mNumber - previousNumber <=
|
|
||||||
uint64_t(std::numeric_limits<int64_t>::max()));
|
|
||||||
|
|
||||||
AutoArraySchemaWriter writer(aWriter);
|
|
||||||
writer.TimeMsElement(TIME, samples[i].mTime);
|
|
||||||
writer.IntElement(COUNT, samples[i].mCount - previousCount);
|
|
||||||
if (hasNumber) {
|
|
||||||
writer.IntElement(
|
|
||||||
NUMBER,
|
|
||||||
static_cast<int64_t>(samples[i].mNumber - previousNumber));
|
|
||||||
}
|
|
||||||
previousNumber = samples[i].mNumber;
|
|
||||||
previousCount = samples[i].mCount;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
aWriter.EndArray(); // data
|
|
||||||
aWriter.EndObject(); // samples
|
|
||||||
}
|
|
||||||
aWriter.EndObject(); // sample_groups item
|
|
||||||
}
|
}
|
||||||
aWriter.EndArray(); // sample groups
|
|
||||||
aWriter.End(); // for each counter
|
aWriter.StartObjectProperty("samples");
|
||||||
|
{
|
||||||
|
JSONSchemaWriter schema(aWriter);
|
||||||
|
schema.WriteField("time");
|
||||||
|
schema.WriteField("count");
|
||||||
|
if (hasNumber) {
|
||||||
|
schema.WriteField("number");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
aWriter.StartArrayProperty("data");
|
||||||
|
uint64_t previousNumber = 0;
|
||||||
|
int64_t previousCount = 0;
|
||||||
|
for (size_t i = 0; i < size; i++) {
|
||||||
|
// Encode as deltas, and only encode if different than the last
|
||||||
|
// sample
|
||||||
|
if (i == 0 || samples[i].mNumber != previousNumber ||
|
||||||
|
samples[i].mCount != previousCount) {
|
||||||
|
MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
|
||||||
|
MOZ_ASSERT(samples[i].mNumber >= previousNumber);
|
||||||
|
MOZ_ASSERT(samples[i].mNumber - previousNumber <=
|
||||||
|
uint64_t(std::numeric_limits<int64_t>::max()));
|
||||||
|
|
||||||
|
AutoArraySchemaWriter writer(aWriter);
|
||||||
|
writer.TimeMsElement(TIME, samples[i].mTime);
|
||||||
|
writer.IntElement(COUNT, samples[i].mCount - previousCount);
|
||||||
|
if (hasNumber) {
|
||||||
|
writer.IntElement(NUMBER, static_cast<int64_t>(samples[i].mNumber -
|
||||||
|
previousNumber));
|
||||||
|
}
|
||||||
|
previousNumber = samples[i].mNumber;
|
||||||
|
previousCount = samples[i].mCount;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
aWriter.EndArray(); // data
|
||||||
|
aWriter.EndObject(); // samples
|
||||||
|
aWriter.End(); // for each counter
|
||||||
}
|
}
|
||||||
aWriter.EndArray(); // counters
|
aWriter.EndArray(); // counters
|
||||||
});
|
});
|
||||||
|
|
@ -1261,7 +1236,6 @@ bool ProfileBuffer::DuplicateLastSample(BaseProfilerThreadId aThreadId,
|
||||||
ProfileBufferEntry::Time(
|
ProfileBufferEntry::Time(
|
||||||
(TimeStamp::Now() - aProcessStartTime).ToMilliseconds()));
|
(TimeStamp::Now() - aProcessStartTime).ToMilliseconds()));
|
||||||
break;
|
break;
|
||||||
case ProfileBufferEntry::Kind::CounterKey:
|
|
||||||
case ProfileBufferEntry::Kind::Number:
|
case ProfileBufferEntry::Kind::Number:
|
||||||
case ProfileBufferEntry::Kind::Count:
|
case ProfileBufferEntry::Kind::Count:
|
||||||
case ProfileBufferEntry::Kind::Responsiveness:
|
case ProfileBufferEntry::Kind::Responsiveness:
|
||||||
|
|
|
||||||
|
|
@ -2348,14 +2348,9 @@ void SamplerThread::Run() {
|
||||||
// create Buffer entries for each counter
|
// create Buffer entries for each counter
|
||||||
buffer.AddEntry(ProfileBufferEntry::CounterId(counter));
|
buffer.AddEntry(ProfileBufferEntry::CounterId(counter));
|
||||||
buffer.AddEntry(ProfileBufferEntry::Time(delta.ToMilliseconds()));
|
buffer.AddEntry(ProfileBufferEntry::Time(delta.ToMilliseconds()));
|
||||||
// XXX support keyed maps of counts
|
|
||||||
// In the future, we'll support keyed counters - for example, counters
|
|
||||||
// with a key which is a thread ID. For "simple" counters we'll just
|
|
||||||
// use a key of 0.
|
|
||||||
int64_t count;
|
int64_t count;
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
counter->Sample(count, number);
|
counter->Sample(count, number);
|
||||||
buffer.AddEntry(ProfileBufferEntry::CounterKey(0));
|
|
||||||
buffer.AddEntry(ProfileBufferEntry::Count(count));
|
buffer.AddEntry(ProfileBufferEntry::Count(count));
|
||||||
if (number) {
|
if (number) {
|
||||||
buffer.AddEntry(ProfileBufferEntry::Number(number));
|
buffer.AddEntry(ProfileBufferEntry::Number(number));
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ namespace mozilla {
|
||||||
class ProfileBufferChunkManagerWithLocalLimit;
|
class ProfileBufferChunkManagerWithLocalLimit;
|
||||||
|
|
||||||
// Centrally defines the version of the gecko profiler JSON format.
|
// Centrally defines the version of the gecko profiler JSON format.
|
||||||
const int GECKO_PROFILER_FORMAT_VERSION = 28;
|
const int GECKO_PROFILER_FORMAT_VERSION = 29;
|
||||||
|
|
||||||
namespace baseprofiler::detail {
|
namespace baseprofiler::detail {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,6 @@ static constexpr size_t ProfileBufferEntryNumChars = 8;
|
||||||
MACRO(TimeBeforeCompactStack, double, sizeof(double)) \
|
MACRO(TimeBeforeCompactStack, double, sizeof(double)) \
|
||||||
MACRO(TimeBeforeSameSample, double, sizeof(double)) \
|
MACRO(TimeBeforeSameSample, double, sizeof(double)) \
|
||||||
MACRO(CounterId, void*, sizeof(void*)) \
|
MACRO(CounterId, void*, sizeof(void*)) \
|
||||||
MACRO(CounterKey, uint64_t, sizeof(uint64_t)) \
|
|
||||||
MACRO(Number, uint64_t, sizeof(uint64_t)) \
|
MACRO(Number, uint64_t, sizeof(uint64_t)) \
|
||||||
MACRO(Count, int64_t, sizeof(int64_t)) \
|
MACRO(Count, int64_t, sizeof(int64_t)) \
|
||||||
MACRO(ProfilerOverheadTime, double, sizeof(double)) \
|
MACRO(ProfilerOverheadTime, double, sizeof(double)) \
|
||||||
|
|
|
||||||
|
|
@ -1774,18 +1774,16 @@ void ProfileBuffer::StreamProfilerOverheadToJSON(
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CounterKeyedSample {
|
struct CounterSample {
|
||||||
double mTime;
|
double mTime;
|
||||||
uint64_t mNumber;
|
uint64_t mNumber;
|
||||||
int64_t mCount;
|
int64_t mCount;
|
||||||
};
|
};
|
||||||
|
|
||||||
using CounterKeyedSamples = Vector<CounterKeyedSample>;
|
using CounterSamples = Vector<CounterSample>;
|
||||||
|
|
||||||
static LazyLogModule sFuzzyfoxLog("Fuzzyfox");
|
static LazyLogModule sFuzzyfoxLog("Fuzzyfox");
|
||||||
|
|
||||||
using CounterMap = HashMap<uint64_t, CounterKeyedSamples>;
|
|
||||||
|
|
||||||
// HashMap lookup, if not found, a default value is inserted.
|
// HashMap lookup, if not found, a default value is inserted.
|
||||||
// Returns reference to (existing or new) value inside the HashMap.
|
// Returns reference to (existing or new) value inside the HashMap.
|
||||||
template <typename HashM, typename Key>
|
template <typename HashM, typename Key>
|
||||||
|
|
@ -1822,41 +1820,38 @@ void ProfileBuffer::StreamCountersToJSON(
|
||||||
// Valid sequence in the buffer:
|
// Valid sequence in the buffer:
|
||||||
// CounterID
|
// CounterID
|
||||||
// Time
|
// Time
|
||||||
// ( CounterKey Count Number? )*
|
// ( Count Number? )*
|
||||||
//
|
//
|
||||||
// And the JSON (example):
|
// And the JSON (example):
|
||||||
// "counters": {
|
// "counters": {
|
||||||
// "name": "malloc",
|
// "name": "malloc",
|
||||||
// "category": "Memory",
|
// "category": "Memory",
|
||||||
// "description": "Amount of allocated memory",
|
// "description": "Amount of allocated memory",
|
||||||
// "sample_groups": {
|
// "samples": {
|
||||||
// "id": 0,
|
// "schema": {"time": 0, "count": 1, "number": 2},
|
||||||
// "samples": {
|
// "data": [
|
||||||
// "schema": {"time": 0, "number": 1, "count": 2},
|
// [
|
||||||
// "data": [
|
// 16117.033968000002,
|
||||||
// [
|
// 2446216,
|
||||||
// 16117.033968000002,
|
// 6801320
|
||||||
// 2446216,
|
|
||||||
// 6801320
|
|
||||||
// ],
|
|
||||||
// [
|
|
||||||
// 16118.037638,
|
|
||||||
// 2446216,
|
|
||||||
// 6801320
|
|
||||||
// ],
|
|
||||||
// ],
|
// ],
|
||||||
// }
|
// [
|
||||||
// }
|
// 16118.037638,
|
||||||
// },
|
// 2446216,
|
||||||
|
// 6801320
|
||||||
|
// ],
|
||||||
|
// ],
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
|
||||||
// Build the map of counters and populate it
|
// Build the map of counters and populate it
|
||||||
HashMap<void*, CounterMap> counters;
|
HashMap<void*, CounterSamples> counters;
|
||||||
|
|
||||||
while (e.Has()) {
|
while (e.Has()) {
|
||||||
// skip all non-Counters, including if we start in the middle of a counter
|
// skip all non-Counters, including if we start in the middle of a counter
|
||||||
if (e.Get().IsCounterId()) {
|
if (e.Get().IsCounterId()) {
|
||||||
void* id = e.Get().GetPtr();
|
void* id = e.Get().GetPtr();
|
||||||
CounterMap& counter = LookupOrAdd(counters, id);
|
CounterSamples& data = LookupOrAdd(counters, id);
|
||||||
e.Next();
|
e.Next();
|
||||||
if (!e.Has() || !e.Get().IsTime()) {
|
if (!e.Has() || !e.Get().IsTime()) {
|
||||||
ERROR_AND_CONTINUE("expected a Time entry");
|
ERROR_AND_CONTINUE("expected a Time entry");
|
||||||
|
|
@ -1864,25 +1859,20 @@ void ProfileBuffer::StreamCountersToJSON(
|
||||||
double time = e.Get().GetDouble();
|
double time = e.Get().GetDouble();
|
||||||
e.Next();
|
e.Next();
|
||||||
if (time >= aSinceTime) {
|
if (time >= aSinceTime) {
|
||||||
while (e.Has() && e.Get().IsCounterKey()) {
|
if (!e.Has() || !e.Get().IsCount()) {
|
||||||
uint64_t key = e.Get().GetUint64();
|
ERROR_AND_CONTINUE("expected a Count entry");
|
||||||
CounterKeyedSamples& data = LookupOrAdd(counter, key);
|
|
||||||
e.Next();
|
|
||||||
if (!e.Has() || !e.Get().IsCount()) {
|
|
||||||
ERROR_AND_CONTINUE("expected a Count entry");
|
|
||||||
}
|
|
||||||
int64_t count = e.Get().GetUint64();
|
|
||||||
e.Next();
|
|
||||||
uint64_t number;
|
|
||||||
if (!e.Has() || !e.Get().IsNumber()) {
|
|
||||||
number = 0;
|
|
||||||
} else {
|
|
||||||
number = e.Get().GetInt64();
|
|
||||||
e.Next();
|
|
||||||
}
|
|
||||||
CounterKeyedSample sample = {time, number, count};
|
|
||||||
MOZ_RELEASE_ASSERT(data.append(sample));
|
|
||||||
}
|
}
|
||||||
|
int64_t count = e.Get().GetUint64();
|
||||||
|
e.Next();
|
||||||
|
uint64_t number;
|
||||||
|
if (!e.Has() || !e.Get().IsNumber()) {
|
||||||
|
number = 0;
|
||||||
|
} else {
|
||||||
|
number = e.Get().GetInt64();
|
||||||
|
e.Next();
|
||||||
|
}
|
||||||
|
CounterSample sample = {time, number, count};
|
||||||
|
MOZ_RELEASE_ASSERT(data.append(sample));
|
||||||
} else {
|
} else {
|
||||||
// skip counter sample - only need to skip the initial counter
|
// skip counter sample - only need to skip the initial counter
|
||||||
// id, then let the loop at the top skip the rest
|
// id, then let the loop at the top skip the rest
|
||||||
|
|
@ -1891,14 +1881,18 @@ void ProfileBuffer::StreamCountersToJSON(
|
||||||
e.Next();
|
e.Next();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// we have a map of a map of counter entries; dump them to JSON
|
// we have a map of counter entries; dump them to JSON
|
||||||
if (counters.count() == 0) {
|
if (counters.count() == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
aWriter.StartArrayProperty("counters");
|
aWriter.StartArrayProperty("counters");
|
||||||
for (auto iter = counters.iter(); !iter.done(); iter.next()) {
|
for (auto iter = counters.iter(); !iter.done(); iter.next()) {
|
||||||
CounterMap& counter = iter.get().value();
|
CounterSamples& samples = iter.get().value();
|
||||||
|
size_t size = samples.length();
|
||||||
|
if (size == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
const BaseProfilerCount* base_counter =
|
const BaseProfilerCount* base_counter =
|
||||||
static_cast<const BaseProfilerCount*>(iter.get().key());
|
static_cast<const BaseProfilerCount*>(iter.get().key());
|
||||||
|
|
||||||
|
|
@ -1909,103 +1903,83 @@ void ProfileBuffer::StreamCountersToJSON(
|
||||||
aWriter.StringProperty("description",
|
aWriter.StringProperty("description",
|
||||||
MakeStringSpan(base_counter->mDescription));
|
MakeStringSpan(base_counter->mDescription));
|
||||||
|
|
||||||
aWriter.StartArrayProperty("sample_groups");
|
bool hasNumber = false;
|
||||||
for (auto counter_iter = counter.iter(); !counter_iter.done();
|
for (size_t i = 0; i < size; i++) {
|
||||||
counter_iter.next()) {
|
if (samples[i].mNumber != 0) {
|
||||||
CounterKeyedSamples& samples = counter_iter.get().value();
|
hasNumber = true;
|
||||||
uint64_t key = counter_iter.get().key();
|
break;
|
||||||
|
|
||||||
size_t size = samples.length();
|
|
||||||
if (size == 0) {
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasNumber = false;
|
|
||||||
for (size_t i = 0; i < size; i++) {
|
|
||||||
if (samples[i].mNumber != 0) {
|
|
||||||
hasNumber = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
aWriter.StartObjectElement();
|
|
||||||
{
|
|
||||||
aWriter.IntProperty("id", static_cast<int64_t>(key));
|
|
||||||
aWriter.StartObjectProperty("samples");
|
|
||||||
{
|
|
||||||
JSONSchemaWriter schema(aWriter);
|
|
||||||
schema.WriteField("time");
|
|
||||||
schema.WriteField("count");
|
|
||||||
if (hasNumber) {
|
|
||||||
schema.WriteField("number");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
aWriter.StartArrayProperty("data");
|
|
||||||
double previousSkippedTime = 0.0;
|
|
||||||
uint64_t previousNumber = 0;
|
|
||||||
int64_t previousCount = 0;
|
|
||||||
for (size_t i = 0; i < size; i++) {
|
|
||||||
// Encode as deltas, and only encode if different than the previous
|
|
||||||
// or next sample; Always write the first and last samples.
|
|
||||||
if (i == 0 || i == size - 1 ||
|
|
||||||
samples[i].mNumber != previousNumber ||
|
|
||||||
samples[i].mCount != previousCount ||
|
|
||||||
// Ensure we ouput the first 0 before skipping samples.
|
|
||||||
(i >= 2 && (samples[i - 2].mNumber != previousNumber ||
|
|
||||||
samples[i - 2].mCount != previousCount))) {
|
|
||||||
if (i != 0 && samples[i].mTime >= samples[i - 1].mTime) {
|
|
||||||
MOZ_LOG(sFuzzyfoxLog, mozilla::LogLevel::Error,
|
|
||||||
("Fuzzyfox Profiler Assertion: %f >= %f",
|
|
||||||
samples[i].mTime, samples[i - 1].mTime));
|
|
||||||
}
|
|
||||||
MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
|
|
||||||
MOZ_ASSERT(samples[i].mNumber >= previousNumber);
|
|
||||||
MOZ_ASSERT(samples[i].mNumber - previousNumber <=
|
|
||||||
uint64_t(std::numeric_limits<int64_t>::max()));
|
|
||||||
|
|
||||||
int64_t numberDelta =
|
|
||||||
static_cast<int64_t>(samples[i].mNumber - previousNumber);
|
|
||||||
int64_t countDelta = samples[i].mCount - previousCount;
|
|
||||||
|
|
||||||
if (previousSkippedTime != 0.0 &&
|
|
||||||
(numberDelta != 0 || countDelta != 0)) {
|
|
||||||
// Write the last skipped sample, unless the new one is all
|
|
||||||
// zeroes (that'd be redundant) This is useful to know when a
|
|
||||||
// certain value was last sampled, so that the front-end graph
|
|
||||||
// will be more correct.
|
|
||||||
AutoArraySchemaWriter writer(aWriter);
|
|
||||||
writer.TimeMsElement(TIME, previousSkippedTime);
|
|
||||||
// The deltas are effectively zeroes, since no change happened
|
|
||||||
// between the last actually-written sample and the last skipped
|
|
||||||
// one.
|
|
||||||
writer.IntElement(COUNT, 0);
|
|
||||||
if (hasNumber) {
|
|
||||||
writer.IntElement(NUMBER, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
AutoArraySchemaWriter writer(aWriter);
|
|
||||||
writer.TimeMsElement(TIME, samples[i].mTime);
|
|
||||||
writer.IntElement(COUNT, countDelta);
|
|
||||||
if (hasNumber) {
|
|
||||||
writer.IntElement(NUMBER, numberDelta);
|
|
||||||
}
|
|
||||||
|
|
||||||
previousSkippedTime = 0.0;
|
|
||||||
previousNumber = samples[i].mNumber;
|
|
||||||
previousCount = samples[i].mCount;
|
|
||||||
} else {
|
|
||||||
previousSkippedTime = samples[i].mTime;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
aWriter.EndArray(); // data
|
|
||||||
aWriter.EndObject(); // samples
|
|
||||||
}
|
|
||||||
aWriter.EndObject(); // sample_groups item
|
|
||||||
}
|
}
|
||||||
aWriter.EndArray(); // sample groups
|
aWriter.StartObjectProperty("samples");
|
||||||
aWriter.End(); // for each counter
|
{
|
||||||
|
JSONSchemaWriter schema(aWriter);
|
||||||
|
schema.WriteField("time");
|
||||||
|
schema.WriteField("count");
|
||||||
|
if (hasNumber) {
|
||||||
|
schema.WriteField("number");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
aWriter.StartArrayProperty("data");
|
||||||
|
double previousSkippedTime = 0.0;
|
||||||
|
uint64_t previousNumber = 0;
|
||||||
|
int64_t previousCount = 0;
|
||||||
|
for (size_t i = 0; i < size; i++) {
|
||||||
|
// Encode as deltas, and only encode if different than the previous
|
||||||
|
// or next sample; Always write the first and last samples.
|
||||||
|
if (i == 0 || i == size - 1 || samples[i].mNumber != previousNumber ||
|
||||||
|
samples[i].mCount != previousCount ||
|
||||||
|
// Ensure we ouput the first 0 before skipping samples.
|
||||||
|
(i >= 2 && (samples[i - 2].mNumber != previousNumber ||
|
||||||
|
samples[i - 2].mCount != previousCount))) {
|
||||||
|
if (i != 0 && samples[i].mTime >= samples[i - 1].mTime) {
|
||||||
|
MOZ_LOG(sFuzzyfoxLog, mozilla::LogLevel::Error,
|
||||||
|
("Fuzzyfox Profiler Assertion: %f >= %f", samples[i].mTime,
|
||||||
|
samples[i - 1].mTime));
|
||||||
|
}
|
||||||
|
MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
|
||||||
|
MOZ_ASSERT(samples[i].mNumber >= previousNumber);
|
||||||
|
MOZ_ASSERT(samples[i].mNumber - previousNumber <=
|
||||||
|
uint64_t(std::numeric_limits<int64_t>::max()));
|
||||||
|
|
||||||
|
int64_t numberDelta =
|
||||||
|
static_cast<int64_t>(samples[i].mNumber - previousNumber);
|
||||||
|
int64_t countDelta = samples[i].mCount - previousCount;
|
||||||
|
|
||||||
|
if (previousSkippedTime != 0.0 &&
|
||||||
|
(numberDelta != 0 || countDelta != 0)) {
|
||||||
|
// Write the last skipped sample, unless the new one is all
|
||||||
|
// zeroes (that'd be redundant) This is useful to know when a
|
||||||
|
// certain value was last sampled, so that the front-end graph
|
||||||
|
// will be more correct.
|
||||||
|
AutoArraySchemaWriter writer(aWriter);
|
||||||
|
writer.TimeMsElement(TIME, previousSkippedTime);
|
||||||
|
// The deltas are effectively zeroes, since no change happened
|
||||||
|
// between the last actually-written sample and the last skipped
|
||||||
|
// one.
|
||||||
|
writer.IntElement(COUNT, 0);
|
||||||
|
if (hasNumber) {
|
||||||
|
writer.IntElement(NUMBER, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
AutoArraySchemaWriter writer(aWriter);
|
||||||
|
writer.TimeMsElement(TIME, samples[i].mTime);
|
||||||
|
writer.IntElement(COUNT, countDelta);
|
||||||
|
if (hasNumber) {
|
||||||
|
writer.IntElement(NUMBER, numberDelta);
|
||||||
|
}
|
||||||
|
|
||||||
|
previousSkippedTime = 0.0;
|
||||||
|
previousNumber = samples[i].mNumber;
|
||||||
|
previousCount = samples[i].mCount;
|
||||||
|
} else {
|
||||||
|
previousSkippedTime = samples[i].mTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
aWriter.EndArray(); // data
|
||||||
|
aWriter.EndObject(); // samples
|
||||||
|
aWriter.End(); // for each counter
|
||||||
}
|
}
|
||||||
aWriter.EndArray(); // counters
|
aWriter.EndArray(); // counters
|
||||||
});
|
});
|
||||||
|
|
@ -2228,7 +2202,6 @@ bool ProfileBuffer::DuplicateLastSample(ProfilerThreadId aThreadId,
|
||||||
// We're done.
|
// We're done.
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
case ProfileBufferEntry::Kind::CounterKey:
|
|
||||||
case ProfileBufferEntry::Kind::Number:
|
case ProfileBufferEntry::Kind::Number:
|
||||||
case ProfileBufferEntry::Kind::Count:
|
case ProfileBufferEntry::Kind::Count:
|
||||||
// Don't copy anything not part of a thread's stack sample
|
// Don't copy anything not part of a thread's stack sample
|
||||||
|
|
|
||||||
|
|
@ -4236,10 +4236,6 @@ void SamplerThread::Run() {
|
||||||
ActivePS::ControlledChunkManager(lock).TotalSize());
|
ActivePS::ControlledChunkManager(lock).TotalSize());
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
// In the future, we may support keyed counters - for example,
|
|
||||||
// counters with a key which is a thread ID. For "simple" counters
|
|
||||||
// we'll just use a key of 0.
|
|
||||||
buffer.AddEntry(ProfileBufferEntry::CounterKey(0));
|
|
||||||
buffer.AddEntry(ProfileBufferEntry::Count(sample.count));
|
buffer.AddEntry(ProfileBufferEntry::Count(sample.count));
|
||||||
if (sample.number) {
|
if (sample.number) {
|
||||||
buffer.AddEntry(ProfileBufferEntry::Number(sample.number));
|
buffer.AddEntry(ProfileBufferEntry::Number(sample.number));
|
||||||
|
|
|
||||||
|
|
@ -1393,30 +1393,24 @@ static void JSONRootCheck(const Json::Value& aRoot,
|
||||||
EXPECT_HAS_JSON(counter["name"], String);
|
EXPECT_HAS_JSON(counter["name"], String);
|
||||||
EXPECT_HAS_JSON(counter["category"], String);
|
EXPECT_HAS_JSON(counter["category"], String);
|
||||||
EXPECT_HAS_JSON(counter["description"], String);
|
EXPECT_HAS_JSON(counter["description"], String);
|
||||||
GET_JSON(sampleGroups, counter["sample_groups"], Array);
|
GET_JSON(samples, counter["samples"], Object);
|
||||||
for (const Json::Value& sampleGroup : sampleGroups) {
|
GET_JSON(samplesSchema, samples["schema"], Object);
|
||||||
ASSERT_TRUE(sampleGroup.isObject());
|
EXPECT_GE(samplesSchema.size(), 3u);
|
||||||
EXPECT_HAS_JSON(sampleGroup["id"], UInt);
|
GET_JSON_VALUE(samplesTime, samplesSchema["time"], UInt);
|
||||||
|
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
|
||||||
GET_JSON(samples, sampleGroup["samples"], Object);
|
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
|
||||||
GET_JSON(samplesSchema, samples["schema"], Object);
|
GET_JSON(samplesData, samples["data"], Array);
|
||||||
EXPECT_GE(samplesSchema.size(), 3u);
|
double previousTime = 0.0;
|
||||||
GET_JSON_VALUE(samplesTime, samplesSchema["time"], UInt);
|
for (const Json::Value& sample : samplesData) {
|
||||||
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
|
ASSERT_TRUE(sample.isArray());
|
||||||
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
|
GET_JSON_VALUE(time, sample[samplesTime], Double);
|
||||||
GET_JSON(samplesData, samples["data"], Array);
|
EXPECT_GE(time, previousTime);
|
||||||
double previousTime = 0.0;
|
previousTime = time;
|
||||||
for (const Json::Value& sample : samplesData) {
|
if (sample.isValidIndex(samplesNumber)) {
|
||||||
ASSERT_TRUE(sample.isArray());
|
EXPECT_HAS_JSON(sample[samplesNumber], UInt64);
|
||||||
GET_JSON_VALUE(time, sample[samplesTime], Double);
|
}
|
||||||
EXPECT_GE(time, previousTime);
|
if (sample.isValidIndex(samplesCount)) {
|
||||||
previousTime = time;
|
EXPECT_HAS_JSON(sample[samplesCount], Int64);
|
||||||
if (sample.isValidIndex(samplesNumber)) {
|
|
||||||
EXPECT_HAS_JSON(sample[samplesNumber], UInt64);
|
|
||||||
}
|
|
||||||
if (sample.isValidIndex(samplesCount)) {
|
|
||||||
EXPECT_HAS_JSON(sample[samplesCount], Int64);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -3572,56 +3566,42 @@ TEST(GeckoProfiler, Counters)
|
||||||
if (name == "TestCounter") {
|
if (name == "TestCounter") {
|
||||||
EXPECT_EQ_JSON(counter["category"], String, COUNTER_NAME);
|
EXPECT_EQ_JSON(counter["category"], String, COUNTER_NAME);
|
||||||
EXPECT_EQ_JSON(counter["description"], String, COUNTER_DESCRIPTION);
|
EXPECT_EQ_JSON(counter["description"], String, COUNTER_DESCRIPTION);
|
||||||
GET_JSON(sampleGroups, counter["sample_groups"], Array);
|
GET_JSON(samples, counter["samples"], Object);
|
||||||
for (const Json::Value& sampleGroup : sampleGroups) {
|
GET_JSON(samplesSchema, samples["schema"], Object);
|
||||||
ASSERT_TRUE(sampleGroup.isObject());
|
EXPECT_GE(samplesSchema.size(), 3u);
|
||||||
EXPECT_EQ_JSON(sampleGroup["id"], UInt, 0u);
|
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
|
||||||
|
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
|
||||||
GET_JSON(samples, sampleGroup["samples"], Object);
|
GET_JSON(samplesData, samples["data"], Array);
|
||||||
GET_JSON(samplesSchema, samples["schema"], Object);
|
for (const Json::Value& sample : samplesData) {
|
||||||
EXPECT_GE(samplesSchema.size(), 3u);
|
ASSERT_TRUE(sample.isArray());
|
||||||
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
|
ASSERT_LT(nextExpectedTestCounter, expectedTestCountersCount);
|
||||||
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
|
EXPECT_EQ_JSON(sample[samplesNumber], UInt64,
|
||||||
GET_JSON(samplesData, samples["data"], Array);
|
expectedTestCounters[nextExpectedTestCounter].mNumber);
|
||||||
for (const Json::Value& sample : samplesData) {
|
EXPECT_EQ_JSON(sample[samplesCount], Int64,
|
||||||
ASSERT_TRUE(sample.isArray());
|
expectedTestCounters[nextExpectedTestCounter].mCount);
|
||||||
ASSERT_LT(nextExpectedTestCounter, expectedTestCountersCount);
|
++nextExpectedTestCounter;
|
||||||
EXPECT_EQ_JSON(
|
|
||||||
sample[samplesNumber], UInt64,
|
|
||||||
expectedTestCounters[nextExpectedTestCounter].mNumber);
|
|
||||||
EXPECT_EQ_JSON(
|
|
||||||
sample[samplesCount], Int64,
|
|
||||||
expectedTestCounters[nextExpectedTestCounter].mCount);
|
|
||||||
++nextExpectedTestCounter;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else if (name == "TestCounter2") {
|
} else if (name == "TestCounter2") {
|
||||||
EXPECT_TRUE(expectCounter2);
|
EXPECT_TRUE(expectCounter2);
|
||||||
|
|
||||||
EXPECT_EQ_JSON(counter["category"], String, COUNTER_NAME2);
|
EXPECT_EQ_JSON(counter["category"], String, COUNTER_NAME2);
|
||||||
EXPECT_EQ_JSON(counter["description"], String, COUNTER_DESCRIPTION2);
|
EXPECT_EQ_JSON(counter["description"], String, COUNTER_DESCRIPTION2);
|
||||||
GET_JSON(sampleGroups, counter["sample_groups"], Array);
|
GET_JSON(samples, counter["samples"], Object);
|
||||||
for (const Json::Value& sampleGroup : sampleGroups) {
|
GET_JSON(samplesSchema, samples["schema"], Object);
|
||||||
ASSERT_TRUE(sampleGroup.isObject());
|
EXPECT_GE(samplesSchema.size(), 3u);
|
||||||
EXPECT_EQ_JSON(sampleGroup["id"], UInt, 0u);
|
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
|
||||||
|
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
|
||||||
GET_JSON(samples, sampleGroup["samples"], Object);
|
GET_JSON(samplesData, samples["data"], Array);
|
||||||
GET_JSON(samplesSchema, samples["schema"], Object);
|
for (const Json::Value& sample : samplesData) {
|
||||||
EXPECT_GE(samplesSchema.size(), 3u);
|
ASSERT_TRUE(sample.isArray());
|
||||||
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
|
ASSERT_LT(nextExpectedTestCounter2, expectedTestCounters2Count);
|
||||||
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
|
EXPECT_EQ_JSON(
|
||||||
GET_JSON(samplesData, samples["data"], Array);
|
sample[samplesNumber], UInt64,
|
||||||
for (const Json::Value& sample : samplesData) {
|
expectedTestCounters2[nextExpectedTestCounter2].mNumber);
|
||||||
ASSERT_TRUE(sample.isArray());
|
EXPECT_EQ_JSON(
|
||||||
ASSERT_LT(nextExpectedTestCounter2, expectedTestCounters2Count);
|
sample[samplesCount], Int64,
|
||||||
EXPECT_EQ_JSON(
|
expectedTestCounters2[nextExpectedTestCounter2].mCount);
|
||||||
sample[samplesNumber], UInt64,
|
++nextExpectedTestCounter2;
|
||||||
expectedTestCounters2[nextExpectedTestCounter2].mNumber);
|
|
||||||
EXPECT_EQ_JSON(
|
|
||||||
sample[samplesCount], Int64,
|
|
||||||
expectedTestCounters2[nextExpectedTestCounter2].mCount);
|
|
||||||
++nextExpectedTestCounter2;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue