Compare commits
1 Commits
native-ima
...
jstack
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a20d771635 |
@@ -1,752 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang='en'>
|
||||
<head>
|
||||
<meta charset='utf-8'>
|
||||
<style>
|
||||
body {margin: 0; padding: 10px 10px 22px 10px; background-color: #ffffff}
|
||||
h1 {margin: 5px 0 0 0; font-size: 18px; font-weight: normal; text-align: center}
|
||||
header {margin: -24px 0 5px 0; line-height: 24px}
|
||||
button {font: 12px sans-serif; cursor: pointer}
|
||||
p {position: fixed; bottom: 0; margin: 0; padding: 2px 3px 2px 3px; outline: 1px solid #ffc000; display: none; overflow: hidden; white-space: nowrap; background-color: #ffffe0}
|
||||
a {color: #0366d6}
|
||||
#hl {position: absolute; display: none; overflow: hidden; white-space: nowrap; pointer-events: none; background-color: #ffffe0; outline: 1px solid #ffc000; height: 15px}
|
||||
#hl span {padding: 0 3px 0 3px}
|
||||
#status {left: 0}
|
||||
#match {right: 0}
|
||||
#reset {cursor: pointer}
|
||||
#canvas {width: 100%; height: 576px}
|
||||
</style>
|
||||
</head>
|
||||
<body style='font: 12px Verdana, sans-serif'>
|
||||
<h1>CPU profile</h1>
|
||||
<header style='text-align: left'><button id='reverse' title='Reverse'>🔻</button> <button id='search' title='Search'>🔍</button></header>
|
||||
<header style='text-align: right'>Produced by <a href='https://github.com/async-profiler/async-profiler'>async-profiler</a></header>
|
||||
<canvas id='canvas'></canvas>
|
||||
<div id='hl'><span></span></div>
|
||||
<p id='status'></p>
|
||||
<p id='match'>Matched: <span id='matchval'></span> <span id='reset' title='Clear'>❌</span></p>
|
||||
<script>
|
||||
// Copyright The async-profiler authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
'use strict';
|
||||
let root, px, pattern;
|
||||
let level0 = 0, left0 = 0, width0 = 0;
|
||||
let nav = [], navIndex, matchval;
|
||||
let reverse = false;
|
||||
const levels = Array(36);
|
||||
for (let h = 0; h < levels.length; h++) {
|
||||
levels[h] = [];
|
||||
}
|
||||
|
||||
const canvas = document.getElementById('canvas');
|
||||
const c = canvas.getContext('2d');
|
||||
const hl = document.getElementById('hl');
|
||||
const status = document.getElementById('status');
|
||||
|
||||
const canvasWidth = canvas.offsetWidth;
|
||||
const canvasHeight = canvas.offsetHeight;
|
||||
canvas.style.width = canvasWidth + 'px';
|
||||
canvas.width = canvasWidth * (devicePixelRatio || 1);
|
||||
canvas.height = canvasHeight * (devicePixelRatio || 1);
|
||||
if (devicePixelRatio) c.scale(devicePixelRatio, devicePixelRatio);
|
||||
c.font = document.body.style.font;
|
||||
|
||||
const palette = [
|
||||
[0xb2e1b2, 20, 20, 20],
|
||||
[0x50e150, 30, 30, 30],
|
||||
[0x50cccc, 30, 30, 30],
|
||||
[0xe15a5a, 30, 40, 40],
|
||||
[0xc8c83c, 30, 30, 10],
|
||||
[0xe17d00, 30, 30, 0],
|
||||
[0xcce880, 20, 20, 20],
|
||||
];
|
||||
|
||||
function getColor(p) {
|
||||
const v = Math.random();
|
||||
return '#' + (p[0] + ((p[1] * v) << 16 | (p[2] * v) << 8 | (p[3] * v))).toString(16);
|
||||
}
|
||||
|
||||
function f(key, level, left, width, inln, c1, int) {
|
||||
levels[level0 = level].push({level, left: left0 += left, width: width0 = width || width0,
|
||||
color: getColor(palette[key & 7]), title: cpool[key >>> 3],
|
||||
details: (int ? ', int=' + int : '') + (c1 ? ', c1=' + c1 : '') + (inln ? ', inln=' + inln : '')
|
||||
});
|
||||
}
|
||||
|
||||
function u(key, width, inln, c1, int) {
|
||||
f(key, level0 + 1, 0, width, inln, c1, int)
|
||||
}
|
||||
|
||||
function n(key, width, inln, c1, int) {
|
||||
f(key, level0, width0, width, inln, c1, int)
|
||||
}
|
||||
|
||||
function samples(n) {
|
||||
return n === 1 ? '1 sample' : n.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ',') + ' samples';
|
||||
}
|
||||
|
||||
function pct(a, b) {
|
||||
return a >= b ? '100' : (100 * a / b).toFixed(2);
|
||||
}
|
||||
|
||||
function findFrame(frames, x) {
|
||||
let left = 0;
|
||||
let right = frames.length - 1;
|
||||
|
||||
while (left <= right) {
|
||||
const mid = (left + right) >>> 1;
|
||||
const f = frames[mid];
|
||||
|
||||
if (f.left > x) {
|
||||
right = mid - 1;
|
||||
} else if (f.left + f.width <= x) {
|
||||
left = mid + 1;
|
||||
} else {
|
||||
return f;
|
||||
}
|
||||
}
|
||||
|
||||
if (frames[left] && (frames[left].left - x) * px < 0.5) return frames[left];
|
||||
if (frames[right] && (x - (frames[right].left + frames[right].width)) * px < 0.5) return frames[right];
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function removeStack(left, width) {
|
||||
for (let h = 0; h < levels.length; h++) {
|
||||
const frames = levels[h], newFrames = [];
|
||||
for (let i = 0; i < frames.length; i++) {
|
||||
const f = frames[i];
|
||||
if (f.left >= left + width) {
|
||||
f.left -= width;
|
||||
} else if (f.left + f.width > left) {
|
||||
if ((f.width -= width) <= 0 && h) continue;
|
||||
}
|
||||
newFrames.push(f);
|
||||
}
|
||||
levels[h] = newFrames;
|
||||
}
|
||||
}
|
||||
|
||||
function search(r) {
|
||||
if (r === true && (r = prompt('Enter regexp to search:', '')) === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
pattern = r ? RegExp(r) : undefined;
|
||||
const matched = render(root, nav = []);
|
||||
navIndex = -1;
|
||||
document.getElementById('matchval').textContent = matchval = pct(matched, root.width) + '%';
|
||||
document.getElementById('match').style.display = r ? 'inline-block' : 'none';
|
||||
}
|
||||
|
||||
function render(newRoot, nav) {
|
||||
if (root) {
|
||||
c.fillStyle = '#ffffff';
|
||||
c.fillRect(0, 0, canvasWidth, canvasHeight);
|
||||
}
|
||||
|
||||
root = newRoot || levels[0][0];
|
||||
px = canvasWidth / root.width;
|
||||
|
||||
const x0 = root.left;
|
||||
const x1 = x0 + root.width;
|
||||
const marked = [];
|
||||
|
||||
function mark(f) {
|
||||
return marked[f.left] || (marked[f.left] = f);
|
||||
}
|
||||
|
||||
function totalMarked() {
|
||||
let total = 0;
|
||||
let left = 0;
|
||||
Object.keys(marked).sort(function(a, b) { return a - b; }).forEach(function(x) {
|
||||
if (+x >= left) {
|
||||
const m = marked[x];
|
||||
if (nav) nav.push(m);
|
||||
total += m.width;
|
||||
left = +x + m.width;
|
||||
}
|
||||
});
|
||||
return total;
|
||||
}
|
||||
|
||||
function drawFrame(f, y) {
|
||||
if (f.left < x1 && f.left + f.width > x0) {
|
||||
c.fillStyle = pattern && f.title.match(pattern) && mark(f) ? '#ee00ee' : f.color;
|
||||
c.fillRect((f.left - x0) * px, y, f.width * px, 15);
|
||||
|
||||
if (f.width * px >= 21) {
|
||||
const chars = Math.floor(f.width * px / 7);
|
||||
const title = f.title.length <= chars ? f.title : f.title.substring(0, chars - 2) + '..';
|
||||
c.fillStyle = '#000000';
|
||||
c.fillText(title, Math.max(f.left - x0, 0) * px + 3, y + 12, f.width * px - 6);
|
||||
}
|
||||
|
||||
if (f.level < root.level) {
|
||||
c.fillStyle = 'rgba(255, 255, 255, 0.5)';
|
||||
c.fillRect((f.left - x0) * px, y, f.width * px, 15);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (let h = 0; h < levels.length; h++) {
|
||||
const y = reverse ? h * 16 : canvasHeight - (h + 1) * 16;
|
||||
const frames = levels[h];
|
||||
for (let i = 0; i < frames.length; i++) {
|
||||
drawFrame(frames[i], y);
|
||||
}
|
||||
}
|
||||
|
||||
return totalMarked();
|
||||
}
|
||||
|
||||
function unpack(cpool) {
|
||||
for (let i = 1; i < cpool.length; i++) {
|
||||
cpool[i] = cpool[i - 1].substring(0, cpool[i].charCodeAt(0) - 32) + cpool[i].substring(1);
|
||||
}
|
||||
}
|
||||
|
||||
canvas.onmousemove = function() {
|
||||
const h = Math.floor((reverse ? event.offsetY : (canvasHeight - event.offsetY)) / 16);
|
||||
if (h >= 0 && h < levels.length) {
|
||||
const f = findFrame(levels[h], event.offsetX / px + root.left);
|
||||
if (f) {
|
||||
if (f !== root) getSelection().removeAllRanges();
|
||||
hl.style.left = (Math.max(f.left - root.left, 0) * px + canvas.offsetLeft) + 'px';
|
||||
hl.style.width = (Math.min(f.width, root.width) * px) + 'px';
|
||||
hl.style.top = ((reverse ? h * 16 : canvasHeight - (h + 1) * 16) + canvas.offsetTop) + 'px';
|
||||
hl.firstChild.textContent = f.title;
|
||||
hl.style.display = 'block';
|
||||
canvas.title = f.title + '\n(' + samples(f.width) + f.details + ', ' + pct(f.width, levels[0][0].width) + '%)';
|
||||
canvas.style.cursor = 'pointer';
|
||||
canvas.onclick = function() {
|
||||
if (event.altKey && h >= root.level) {
|
||||
removeStack(f.left, f.width);
|
||||
root.width > f.width ? render(root) : render();
|
||||
} else if (f !== root) {
|
||||
render(f);
|
||||
}
|
||||
canvas.onmousemove();
|
||||
};
|
||||
status.textContent = 'Function: ' + canvas.title;
|
||||
status.style.display = 'inline-block';
|
||||
return;
|
||||
}
|
||||
}
|
||||
canvas.onmouseout();
|
||||
}
|
||||
|
||||
canvas.onmouseout = function() {
|
||||
hl.style.display = 'none';
|
||||
status.style.display = 'none';
|
||||
canvas.title = '';
|
||||
canvas.style.cursor = '';
|
||||
canvas.onclick = null;
|
||||
}
|
||||
|
||||
canvas.ondblclick = function() {
|
||||
getSelection().selectAllChildren(hl);
|
||||
}
|
||||
|
||||
document.getElementById('reverse').onclick = function() {
|
||||
reverse = !reverse;
|
||||
render();
|
||||
}
|
||||
|
||||
document.getElementById('search').onclick = function() {
|
||||
search(true);
|
||||
}
|
||||
|
||||
document.getElementById('reset').onclick = function() {
|
||||
search(false);
|
||||
}
|
||||
|
||||
window.onkeydown = function(event) {
|
||||
if ((event.ctrlKey || event.metaKey) && event.key === 'f') {
|
||||
event.preventDefault();
|
||||
search(true);
|
||||
} else if (event.key === 'Escape') {
|
||||
search(false);
|
||||
} else if ((event.key === 'n' || event.key === 'N') && nav.length > 0) {
|
||||
navIndex = (navIndex + (event.shiftKey ? nav.length - 1 : 1)) % nav.length;
|
||||
render(nav[navIndex]);
|
||||
document.getElementById('matchval').textContent = matchval + ' (' + (navIndex + 1) + ' of ' + nav.length + ')';
|
||||
window.scroll(0, reverse ? root.level * 16 : canvasHeight - (root.level + 1) * 16);
|
||||
canvas.onmousemove();
|
||||
}
|
||||
}
|
||||
|
||||
const cpool = [
|
||||
'all',
|
||||
' C2Compiler::compile_method',
|
||||
'!ompilation::Compilation',
|
||||
'-compile_java_method',
|
||||
'5method',
|
||||
'-emit_code_body',
|
||||
'&e::Code_Gen',
|
||||
'+mpile',
|
||||
')Optimize',
|
||||
'\'Broker::compiler_thread_loop',
|
||||
'/invoke_compiler_on_method',
|
||||
'\'r::compile_method',
|
||||
'"ntiguousSpace::allocate',
|
||||
' DefNewGeneration::FastEvacuateFollowersClosure::do_void',
|
||||
'2collect',
|
||||
'4py_to_survivor_space',
|
||||
' GenCollectedHeap::collect_generation',
|
||||
'2do_collection',
|
||||
'2satisfy_failed_allocation',
|
||||
'#eration::promote',
|
||||
' InstanceKlass::allocate_objArray',
|
||||
'"terpreterRuntime::anewarray',
|
||||
' JVM_ArrayCopy',
|
||||
'!avaThread::run',
|
||||
'$_sun_nio_ch_FileDispatcherImpl_read0',
|
||||
' Matcher::match',
|
||||
'!emAllocator::allocate',
|
||||
' ObjArrayAllocator::initialize',
|
||||
'!ffsetTableContigSpace::allocate',
|
||||
' Parse::Parse',
|
||||
'\'do_all_blocks',
|
||||
'*call',
|
||||
'*one_block',
|
||||
'/ytecode',
|
||||
'%Generator::generate',
|
||||
'!haseCFG::do_global_code_motion',
|
||||
'*global_code_motion',
|
||||
'*schedule_late',
|
||||
'4ocal',
|
||||
'&haitin::Register_Allocate',
|
||||
'.Split',
|
||||
'.build_ifg_physical',
|
||||
'.elide_copy',
|
||||
'.interfere_with_live',
|
||||
'.merge_multidefs',
|
||||
'.post_allocate_copy_removal',
|
||||
'%IdealLoop::Dominators',
|
||||
'0build_and_optimize',
|
||||
'6loop_early',
|
||||
';late',
|
||||
';tree',
|
||||
'0optimize',
|
||||
'0remix_address_expressions',
|
||||
'0split_if_with_blocks',
|
||||
'D_post',
|
||||
'Fre',
|
||||
'&terGVN::optimize',
|
||||
'.subsume_node',
|
||||
'.transform_old',
|
||||
'%Live::add_liveout',
|
||||
'+compute',
|
||||
'%MacroExpand::expand_macro_nodes',
|
||||
'!redictedCallGenerator::generate',
|
||||
' TenuredGeneration::allocate',
|
||||
'!hread::call_run',
|
||||
' VMThread::evaluate_operation',
|
||||
'*inner_execute',
|
||||
'*run',
|
||||
'"_GenCollectForAllocation::doit',
|
||||
'#Operation::evaluate',
|
||||
' __GI_read',
|
||||
'"handle_mm_fault',
|
||||
'"memcpy_sse2_unaligned_erms',
|
||||
'%set_avx2_unaligned_erms',
|
||||
' aci_CopyRight',
|
||||
'!sm_exc_page_fault',
|
||||
' clear_huge_page',
|
||||
'&page_erms',
|
||||
'&subpage',
|
||||
'"one3',
|
||||
'!opy_page_to_iter',
|
||||
'%user_enhanced_fast_string',
|
||||
' demo8/FileConverter$$Lambda$3.0x00007ffab9001000.apply',
|
||||
'<4.0x00007ffab9001240.applyAsInt',
|
||||
'4Entry.<init>',
|
||||
':equals',
|
||||
':hashCode',
|
||||
'3.convertFile',
|
||||
';List',
|
||||
'4main',
|
||||
'4readInput',
|
||||
'4saveResult',
|
||||
'!o_huge_pmd_anonymous_page',
|
||||
'#syscall_64',
|
||||
'#user_addr_fault',
|
||||
' entry_SYSCALL_64_after_hwframe',
|
||||
'!xc_page_fault',
|
||||
' filemap_read',
|
||||
' handle_mm_fault',
|
||||
' java/io/BufferedReader.fill',
|
||||
'7readLine',
|
||||
')yteArrayOutputStream.ensureCapacity',
|
||||
'>toByteArray',
|
||||
'>write',
|
||||
'(DataOutputStream.write',
|
||||
'>Int',
|
||||
'>UTF',
|
||||
'(InputStreamReader.read',
|
||||
'%lang/Integer.parseInt',
|
||||
'*String.<init>',
|
||||
'1decodeASCII',
|
||||
'1hashCode',
|
||||
'1length',
|
||||
'1substring',
|
||||
'0Latin1.hashCode',
|
||||
'7newString',
|
||||
'0UTF16.compress',
|
||||
'+ystem$2.decodeASCII',
|
||||
'0.arraycopy',
|
||||
'*ThreadLocal.get',
|
||||
'%nio/charset/CharsetDecoder.decode',
|
||||
')file/Files.readAllLines',
|
||||
'%util/ArrayList$ArrayListSpliterator.tryAdvance',
|
||||
'3.add',
|
||||
'4grow',
|
||||
'4sort',
|
||||
'/s.copyOf',
|
||||
'7Range',
|
||||
'1sort',
|
||||
'*Comparator$$Lambda$5.0x00007ffab90494b0.compare',
|
||||
'4.lambda$comparingInt$7b0bb60$1',
|
||||
'*HashMap$Node.<init>',
|
||||
'1.hash',
|
||||
'2newNode',
|
||||
'2put',
|
||||
'5Val',
|
||||
'2resize',
|
||||
'.Set.add',
|
||||
'*TimSort.binarySort',
|
||||
'2mergeAt',
|
||||
'7Collapse',
|
||||
'7ForceCollapse',
|
||||
'7Hi',
|
||||
'7Lo',
|
||||
'2sort',
|
||||
'*stream/AbstractPipeline.copyInto',
|
||||
'JWithCancel',
|
||||
'Bevaluate',
|
||||
'BwrapAndCopyInto',
|
||||
'1Collectors$$Lambda$7.0x00007ffab904a268.accept',
|
||||
'1DistinctOps$1$2.accept',
|
||||
'Aend',
|
||||
'1ReduceOps$3ReducingSink.accept',
|
||||
';ReduceOp.evaluateSequential',
|
||||
'3ferencePipeline$3$1.accept',
|
||||
'B.collect',
|
||||
'CforEachWithCancel',
|
||||
'1Sink$ChainedReference.end',
|
||||
'2liceOps$1$1.accept',
|
||||
'2ortedOps$RefSortingSink.accept',
|
||||
'Jend',
|
||||
'!long_disjoint_arraycopy',
|
||||
' ksys_read',
|
||||
' new_sync_read',
|
||||
' oop_arraycopy',
|
||||
' start_thread',
|
||||
'!un/nio/ch/ChannelInputStream.read',
|
||||
'+FileChannelImpl.read',
|
||||
'/DispatcherImpl.read',
|
||||
'B0',
|
||||
'+IOUtil.read',
|
||||
'6IntoNativeBuffer',
|
||||
'+Util.getTemporaryDirectBuffer',
|
||||
')s/StreamDecoder.implRead',
|
||||
'9read',
|
||||
'=Bytes',
|
||||
'+UTF_8$Decoder.decodeArrayLoop',
|
||||
'?Loop',
|
||||
' thread_native_entry',
|
||||
' vfs_read',
|
||||
'!oid ContiguousSpace::oop_since_save_marks_iterate<DefNewScanClosure>',
|
||||
'%OopOopIterateDispatch<DefNewScanClosure>::Table::oop_oop_iterate<InstanceKlass, narrowOop>',
|
||||
'fObjArrayKlass, narrowOop>',
|
||||
'AYoungerGenClosure>::Table::oop_oop_iterate<InstanceKlass, narrowOop>'
|
||||
];
|
||||
unpack(cpool);
|
||||
|
||||
n(3,584)
|
||||
f(635,1,1,178)
|
||||
u(1323)
|
||||
u(1428)
|
||||
u(516)
|
||||
u(188,70)
|
||||
u(76)
|
||||
f(84,7,2,68)
|
||||
f(12,8,2,63)
|
||||
u(60)
|
||||
u(52,36)
|
||||
f(204,11,3,2)
|
||||
n(284,7)
|
||||
u(292)
|
||||
f(300,13,1,2)
|
||||
n(308,4)
|
||||
f(316,11,4,24)
|
||||
f(324,12,6,2)
|
||||
n(332,9)
|
||||
f(348,13,5,4)
|
||||
f(356,12,4,2)
|
||||
n(364)
|
||||
u(340)
|
||||
f(484,12,2,3)
|
||||
u(476)
|
||||
f(68,10,3,23)
|
||||
f(412,11,2,16)
|
||||
u(380,15)
|
||||
f(372,13,1,3)
|
||||
n(388,2)
|
||||
n(396)
|
||||
n(404)
|
||||
n(428,5)
|
||||
f(436,14,1,2)
|
||||
n(444)
|
||||
u(420)
|
||||
f(452,11,3,3)
|
||||
u(468)
|
||||
f(460,13,1,2)
|
||||
f(492,11,2)
|
||||
u(452)
|
||||
u(468)
|
||||
f(276,10,2,4)
|
||||
u(236)
|
||||
u(244)
|
||||
u(260)
|
||||
u(268)
|
||||
u(252)
|
||||
f(500,16,1,3)
|
||||
f(500,17,1,2)
|
||||
u(276)
|
||||
u(236)
|
||||
u(244)
|
||||
u(260)
|
||||
u(268)
|
||||
u(252)
|
||||
f(92,8,2,3)
|
||||
u(20)
|
||||
u(36)
|
||||
u(28)
|
||||
f(44,12,1,2)
|
||||
f(540,5,2,108)
|
||||
u(532)
|
||||
u(524)
|
||||
u(556)
|
||||
u(548)
|
||||
u(148)
|
||||
u(140)
|
||||
u(132)
|
||||
u(116)
|
||||
u(108)
|
||||
f(1444,15,12,50)
|
||||
f(1452,16,1,22)
|
||||
f(124,17,2,20)
|
||||
f(156,18,9,8)
|
||||
f(228,19,2,2)
|
||||
n(508)
|
||||
n(605)
|
||||
u(773)
|
||||
u(757)
|
||||
u(789)
|
||||
u(573)
|
||||
u(741)
|
||||
u(613)
|
||||
u(629)
|
||||
u(621)
|
||||
f(579,18,2)
|
||||
f(1460,16,3,27)
|
||||
f(124,17,11,16)
|
||||
f(156,18,13,3)
|
||||
u(605)
|
||||
u(773)
|
||||
u(757)
|
||||
u(789)
|
||||
u(573)
|
||||
u(741)
|
||||
u(613)
|
||||
u(629)
|
||||
u(621)
|
||||
f(1468,15,3,46)
|
||||
f(124,16,19,27)
|
||||
f(100,17,10,4)
|
||||
n(156,13)
|
||||
f(228,18,1,2)
|
||||
n(605,10)
|
||||
u(773)
|
||||
u(757)
|
||||
u(789)
|
||||
u(573)
|
||||
u(741)
|
||||
u(613)
|
||||
u(629)
|
||||
f(621,26,1,9)
|
||||
f(713,1,9,405)
|
||||
u(697)
|
||||
u(705,229)
|
||||
f(1241,4,1,228)
|
||||
u(1177)
|
||||
u(1225)
|
||||
u(1185)
|
||||
u(1161)
|
||||
u(1169)
|
||||
u(1249,107)
|
||||
f(977,11,2,105)
|
||||
f(1233,12,2,103,2,0,0)
|
||||
u(657,13)
|
||||
f(674,14,4,9,8,0,0)
|
||||
f(866,15,1,3)
|
||||
u(866)
|
||||
f(906,15,3,5,4,0,0)
|
||||
u(906,3,2,0,0)
|
||||
u(922)
|
||||
f(1018,18,1,2)
|
||||
f(922,16,2)
|
||||
f(1201,13,2,90,2,0,0)
|
||||
u(1098,87,57,0,0)
|
||||
u(1074,87,59,0,0)
|
||||
u(1057,4)
|
||||
u(689)
|
||||
f(890,18,1,3)
|
||||
f(914,19,1,2)
|
||||
f(1082,16,2,83,59,1,0)
|
||||
f(682,17,41,2)
|
||||
n(1066,17)
|
||||
u(1050)
|
||||
f(1089,17,17,23,0,0,2)
|
||||
f(1265,14,23,3)
|
||||
u(1273)
|
||||
u(985)
|
||||
u(985)
|
||||
u(993)
|
||||
u(993)
|
||||
u(1009)
|
||||
u(1008)
|
||||
u(172)
|
||||
u(164)
|
||||
u(212)
|
||||
u(220)
|
||||
u(587)
|
||||
u(605)
|
||||
u(773)
|
||||
u(757)
|
||||
u(789)
|
||||
u(573)
|
||||
u(741)
|
||||
u(613)
|
||||
u(629)
|
||||
u(621)
|
||||
f(1257,10,3,121)
|
||||
u(1209)
|
||||
u(1257)
|
||||
u(1281)
|
||||
f(1001,14,1,117)
|
||||
u(1025)
|
||||
u(1153)
|
||||
u(1105,21,0,1,0)
|
||||
f(1034,18,13,4)
|
||||
u(1042)
|
||||
f(666,20,1,3)
|
||||
f(1315,18,3,4)
|
||||
f(1121,17,4,73)
|
||||
u(1113)
|
||||
u(1137,16)
|
||||
f(1034,20,14,2,1,0,0)
|
||||
u(1042)
|
||||
f(1145,19,2,57,0,2,1)
|
||||
f(1033,20,54,3)
|
||||
u(1042)
|
||||
u(666)
|
||||
f(1129,17,3,23)
|
||||
u(1113)
|
||||
u(1137)
|
||||
f(1033,20,18,5,1,0,0)
|
||||
u(1042)
|
||||
u(666)
|
||||
f(1218,14,5,3,1,0,0)
|
||||
u(1194,3,1,0,0)
|
||||
u(986,3,1,0,0)
|
||||
f(985,17,1,2)
|
||||
u(993)
|
||||
u(993)
|
||||
u(1009)
|
||||
u(1008)
|
||||
u(172)
|
||||
u(164)
|
||||
u(212)
|
||||
u(220)
|
||||
f(721,3,2,107)
|
||||
u(969)
|
||||
u(969)
|
||||
f(801,6,4,97)
|
||||
u(801,97,0,0,1)
|
||||
f(793,8,77,14)
|
||||
u(857,14,1,0,0)
|
||||
u(1393,14,1,0,0)
|
||||
f(1385,11,1,13)
|
||||
u(961,3)
|
||||
u(1417)
|
||||
u(1409)
|
||||
u(938)
|
||||
u(882)
|
||||
f(1401,12,3,10,2,0,0)
|
||||
u(1329,10,3,0,0)
|
||||
u(1329,10,3,0,0)
|
||||
u(1329,10,3,0,0)
|
||||
u(1337,10,3,1,0)
|
||||
f(1361,17,2,8,2,0,0)
|
||||
u(1361,8,2,0,0)
|
||||
u(1369,6)
|
||||
u(1345)
|
||||
u(1353)
|
||||
u(195)
|
||||
u(563)
|
||||
u(765)
|
||||
u(749)
|
||||
f(1301,26,1,5)
|
||||
u(1437)
|
||||
f(1309,28,1,4)
|
||||
u(781)
|
||||
f(645,30,1,3)
|
||||
u(653)
|
||||
f(1378,19,3,2)
|
||||
u(954)
|
||||
u(1291)
|
||||
f(874,8,2,6,5,0,0)
|
||||
u(874,6,5,0,0)
|
||||
u(930,6,5,0,0)
|
||||
f(985,6,6)
|
||||
u(985)
|
||||
u(993)
|
||||
u(993)
|
||||
u(1009)
|
||||
u(1008,6,0,1,4)
|
||||
f(172,12,2,4)
|
||||
u(164)
|
||||
u(212)
|
||||
u(220)
|
||||
u(587)
|
||||
f(605,17,1,3)
|
||||
u(773)
|
||||
u(757)
|
||||
u(789)
|
||||
u(573)
|
||||
u(741)
|
||||
u(613)
|
||||
u(629)
|
||||
u(621)
|
||||
f(729,3,3,69,0,2,1)
|
||||
f(817,4,21,2)
|
||||
u(1009)
|
||||
u(945)
|
||||
u(179)
|
||||
u(595)
|
||||
f(842,4,2,8,5,0,0)
|
||||
f(826,5,1,7,5,0,0)
|
||||
f(809,6,5,2)
|
||||
u(1008,2,0,0,1)
|
||||
f(849,4,2,38)
|
||||
u(849,38,0,0,2)
|
||||
f(834,6,12,23,21,0,0)
|
||||
f(826,7,6,17,15,1,0)
|
||||
f(898,6,17,3)
|
||||
|
||||
search();
|
||||
</script></body></html>
|
||||
|
Before Width: | Height: | Size: 64 KiB |
|
Before Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 57 KiB |
|
Before Width: | Height: | Size: 135 KiB |
|
Before Width: | Height: | Size: 68 KiB |
|
Before Width: | Height: | Size: 116 KiB |
|
Before Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 73 KiB |
|
Before Width: | Height: | Size: 96 KiB |
|
Before Width: | Height: | Size: 134 KiB |
|
Before Width: | Height: | Size: 198 KiB |
|
Before Width: | Height: | Size: 236 KiB |
|
Before Width: | Height: | Size: 106 KiB |
|
Before Width: | Height: | Size: 69 KiB |
|
Before Width: | Height: | Size: 271 KiB |
|
Before Width: | Height: | Size: 166 KiB |
|
Before Width: | Height: | Size: 19 KiB |
52
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,52 +0,0 @@
|
||||
---
|
||||
name: "🐛 Bug Report"
|
||||
description: Report a bug
|
||||
title: "(short issue description)"
|
||||
labels: [bug]
|
||||
assignees: []
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Describe the bug
|
||||
description: What is the problem? A clear and concise description of the bug.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: behavior
|
||||
attributes:
|
||||
label: Expected vs. actual behavior
|
||||
description: |
|
||||
What did you expect to happen? What happened instead?
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
label: Reproduction Steps
|
||||
description: |
|
||||
Step-by-step instructions how to reproduce the issue. Attach a code sample if available.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: context
|
||||
attributes:
|
||||
label: Additional Information/Context
|
||||
description: |
|
||||
Anything else that might be relevant for troubleshooting this bug: profiles, screenshots, etc.
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Async-profiler version
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: environment
|
||||
attributes:
|
||||
label: Environment details
|
||||
description: |
|
||||
OS name and version, JDK version, CPU architecture. Is an application running in a container?
|
||||
validations:
|
||||
required: false
|
||||
6
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,6 +0,0 @@
|
||||
---
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: 💬 General Question
|
||||
url: https://github.com/async-profiler/async-profiler/discussions
|
||||
about: Please ask and answer questions as a discussion thread
|
||||
14
.github/ISSUE_TEMPLATE/documentation.yml
vendored
@@ -1,14 +0,0 @@
|
||||
---
|
||||
name: "📕 Documentation Issue"
|
||||
description: Report an issue in the profiler documentation
|
||||
title: "(short issue description)"
|
||||
labels: [documentation]
|
||||
assignees: []
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Describe the issue
|
||||
description: A clear and concise description of the issue.
|
||||
validations:
|
||||
required: true
|
||||
39
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@@ -1,39 +0,0 @@
|
||||
---
|
||||
name: 🚀 Feature Request
|
||||
description: Suggest an idea for this project
|
||||
title: "(short issue description)"
|
||||
labels: [enhancement]
|
||||
assignees: []
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Describe the feature
|
||||
description: A clear and concise description of the feature you are proposing.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: use-case
|
||||
attributes:
|
||||
label: Use Case
|
||||
description: |
|
||||
Why do you need this feature? For example: "I'm always frustrated when..."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: solution
|
||||
attributes:
|
||||
label: Proposed Solution
|
||||
description: |
|
||||
Suggest how to implement the addition or change. Provide references to alternative solutions, if any.
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: ack
|
||||
attributes:
|
||||
label: Acknowledgements
|
||||
options:
|
||||
- label: I may be able to implement this feature request
|
||||
required: false
|
||||
- label: This feature might incur a breaking change
|
||||
required: false
|
||||
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,17 +0,0 @@
|
||||
### Description
|
||||
|
||||
|
||||
### Related issues
|
||||
|
||||
|
||||
### Motivation and context
|
||||
|
||||
|
||||
### How has this been tested?
|
||||
|
||||
|
||||
---
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the [Apache 2.0 license].
|
||||
|
||||
[Apache 2.0 license]: https://www.apache.org/licenses/LICENSE-2.0
|
||||
21
.github/workflows/linters.yml
vendored
@@ -1,21 +0,0 @@
|
||||
name: lint
|
||||
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
jobs:
|
||||
license-header:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check license headers
|
||||
uses: apache/skywalking-eyes/header@v0.6.0
|
||||
markdown:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install prettier
|
||||
run: |
|
||||
npm install -g prettier@3.4.2
|
||||
make check-md
|
||||
292
.github/workflows/test-and-publish-nightly.yml
vendored
@@ -1,292 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on: # We are very liberal in terms of triggering builds. This should be revisited if we start seeing a lot of queueing
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
env:
|
||||
java_default_distribution: corretto
|
||||
java_default_version: 11
|
||||
|
||||
jobs:
|
||||
build-jars:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build JARs
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
- name: Build JARs
|
||||
run: make jar
|
||||
- name: Upload JARs
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: async-profiler-jars
|
||||
path: build/jar/*
|
||||
if-no-files-found: error
|
||||
build-and-upload-binaries:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- runson:
|
||||
display: linux-arm64
|
||||
name: ubuntu-24.04-arm
|
||||
image: "public.ecr.aws/async-profiler/asprof-builder-arm:latest"
|
||||
- runson:
|
||||
display: linux-x64
|
||||
name: ubuntu-latest
|
||||
image: public.ecr.aws/async-profiler/asprof-builder-x86:latest
|
||||
- runson:
|
||||
display: macos
|
||||
name: macos-15
|
||||
runs-on: ${{ matrix.runson.name }}
|
||||
container:
|
||||
image: ${{ matrix.image }}
|
||||
volumes: ${{ fromJSON(matrix.volumes || '[]') }}
|
||||
name: "Build and unit test (${{ matrix.runson.display }})"
|
||||
steps:
|
||||
- name: Run container setup
|
||||
run: "[ ! -f /root/setup.sh ] || /root/setup.sh"
|
||||
- name: Setup Java
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
distribution: ${{ matrix.java-distribution || env.java_default_distribution }}
|
||||
java-version: ${{ matrix.java-version || env.java_default_version }}
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and unit test
|
||||
id: build
|
||||
run: |
|
||||
set -x
|
||||
HASH=${GITHUB_SHA:0:7}
|
||||
case "${{ matrix.runson.display }}" in
|
||||
macos*)
|
||||
brew install gcovr
|
||||
make COMMIT_TAG=$HASH FAT_BINARY=true release coverage -j
|
||||
;;
|
||||
*)
|
||||
make COMMIT_TAG=$HASH CC=/usr/local/musl/bin/musl-gcc release coverage -j
|
||||
echo "debug_archive=$(find . -type f -name "async-profiler-*-debug*" -exec basename {} \;)" >> $GITHUB_OUTPUT
|
||||
;;
|
||||
esac
|
||||
echo "archive=$(find . -type f -name "async-profiler-*" -not -name "*-debug*" -exec basename {} \;)" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
- name: Set artifact name
|
||||
id: set_artifact_name
|
||||
run: echo "artifact_name=async-profiler-${{ matrix.runson.display }}-${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
- name: Upload binaries
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ steps.set_artifact_name.outputs.artifact_name }}
|
||||
path: ${{ steps.build.outputs.archive }}
|
||||
if-no-files-found: error
|
||||
- name: Upload debug info
|
||||
uses: actions/upload-artifact@v4
|
||||
if: matrix.runson.display != 'macos'
|
||||
with:
|
||||
name: ${{ steps.set_artifact_name.outputs.artifact_name }}-debug
|
||||
path: ${{ steps.build.outputs.debug_archive }}
|
||||
if-no-files-found: error
|
||||
- name: Upload coverage report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-coverage-${{ matrix.runson.display }}
|
||||
path: build/test/coverage/
|
||||
if-no-files-found: error
|
||||
integration-tests:
|
||||
needs: build-and-upload-binaries
|
||||
strategy:
|
||||
matrix:
|
||||
runson:
|
||||
- display: linux-x64
|
||||
# Using "latest" here as the build and test will any ways run inside a container which we control
|
||||
name: ubuntu-latest
|
||||
java-version: [11, 17, 21, 24]
|
||||
java-distribution: [corretto]
|
||||
image: [public.ecr.aws/async-profiler/asprof-builder-x86:latest]
|
||||
include:
|
||||
- runson:
|
||||
display: macos-arm64
|
||||
name: macos-15
|
||||
java-version: 11
|
||||
java-distribution: corretto
|
||||
# Not using container for mac-os as we have images only for linux
|
||||
image: ""
|
||||
# ARM MacOS should take fat binaries built on ARM
|
||||
asprof-binaries-job: macos
|
||||
- runson:
|
||||
display: macos-x64
|
||||
name: macos-13
|
||||
java-version: 11
|
||||
java-distribution: corretto
|
||||
architecture: x64
|
||||
image: ""
|
||||
# x64 MacOS should take fat binaries built on ARM
|
||||
asprof-binaries-job: macos
|
||||
- runson:
|
||||
display: linux-arm64
|
||||
# There is no "latest" tag available (yet) as ARM runners are still in public preview
|
||||
name: ubuntu-24.04-arm
|
||||
java-version: 11
|
||||
java-distribution: corretto
|
||||
image: public.ecr.aws/async-profiler/asprof-builder-arm:latest
|
||||
- runson:
|
||||
display: alpine
|
||||
name: ubuntu-latest
|
||||
java-version: 11
|
||||
java-distribution: corretto
|
||||
asprof-binaries-job: linux-x64
|
||||
image: public.ecr.aws/async-profiler/asprof-builder-alpine:corretto-11
|
||||
- runson:
|
||||
display: amazonlinux2
|
||||
name: ubuntu-latest
|
||||
java-version: 11
|
||||
java-distribution: corretto
|
||||
image: public.ecr.aws/async-profiler/asprof-builder-amazonlinux:2
|
||||
# GHA provides Node.js by attaching a volume to the container. The container path is
|
||||
# '/__e/node20', and it's not writable unless we override it via 'container.volumes'.
|
||||
volumes: '["/tmp/node20:/__e/node20"]'
|
||||
asprof-binaries-job: linux-x64
|
||||
- runson:
|
||||
display: amazonlinux2023
|
||||
name: ubuntu-latest
|
||||
java-version: 11
|
||||
java-distribution: corretto
|
||||
image: public.ecr.aws/async-profiler/asprof-builder-amazonlinux:2023
|
||||
asprof-binaries-job: linux-x64
|
||||
runs-on: ${{ matrix.runson.name }}
|
||||
container:
|
||||
image: ${{ matrix.image }}
|
||||
options: --privileged
|
||||
volumes: ${{ fromJSON(matrix.volumes || '[]') }}
|
||||
name: "Integration test (${{ matrix.runson.display }}, ${{ matrix.java-distribution }} ${{ matrix.java-version }})"
|
||||
steps:
|
||||
- name: Run container setup
|
||||
run: "[ ! -f /root/setup.sh ] || /root/setup.sh"
|
||||
- name: Setup Java
|
||||
uses: actions/setup-java@v4
|
||||
# https://github.com/actions/setup-java/issues/678#issuecomment-2446279753
|
||||
if: matrix.runson.display != 'alpine'
|
||||
with:
|
||||
distribution: ${{ matrix.java-distribution }}
|
||||
java-version: ${{ matrix.java-version }}
|
||||
architecture: ${{ matrix.architecture }}
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
- name: Set variables
|
||||
id: set_variables
|
||||
run: |
|
||||
echo "short_sha=${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
|
||||
echo "artifact_name=async-profiler-${{ matrix.asprof-binaries-job || matrix.runson.display }}-${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
- name: Download async-profiler release artifact
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ steps.set_variables.outputs.artifact_name }}
|
||||
path: async_profiler_release
|
||||
- name: Download async-profiler JAR artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: async-profiler-jars
|
||||
path: jar_artifacts
|
||||
- name: Extract async-profiler artifact
|
||||
id: extract_artifact
|
||||
run: |
|
||||
release_archive=$(basename $(find async_profiler_release -type f -iname "async-profiler-*" ))
|
||||
case "${{ matrix.runson.name }}" in
|
||||
macos*)
|
||||
unzip async_profiler_release/$release_archive
|
||||
;;
|
||||
*)
|
||||
tar xvf async_profiler_release/$release_archive
|
||||
;;
|
||||
esac
|
||||
echo "jars_directory=jar_artifacts" >> $GITHUB_OUTPUT
|
||||
echo "release_directory=$(basename $(find . -type d -iname "async-profiler-*" ))" >> $GITHUB_OUTPUT
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
mkdir -p build/jar
|
||||
cp ${{ steps.extract_artifact.outputs.jars_directory }}/* build/jar
|
||||
make build/test.jar
|
||||
cp -r ${{ steps.extract_artifact.outputs.release_directory }}/bin build
|
||||
cp -r ${{ steps.extract_artifact.outputs.release_directory }}/lib build
|
||||
make test-java -j
|
||||
- name: Upload integration test logs
|
||||
uses: actions/upload-artifact@v4
|
||||
# Always upload, especially after test failure
|
||||
if: always()
|
||||
with:
|
||||
name: integration-test-logs-${{ matrix.runson.display }}-${{ matrix.java-version }}-${{ steps.set_variables.outputs.short_sha }}
|
||||
path: |
|
||||
build/test/logs/
|
||||
hs_err*.log
|
||||
publish-only-on-push:
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
name: publish (nightly)
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-jars, integration-tests]
|
||||
steps:
|
||||
- name: Download async-profiler binaries and jars
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: 'async-profiler-*'
|
||||
merge-multiple: 'true'
|
||||
- name: Delete previous release and publish new release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const fs = require('fs').promises;
|
||||
const commonOptions = {
|
||||
owner: "async-profiler",
|
||||
repo: "async-profiler",
|
||||
};
|
||||
let previousRelease = undefined;
|
||||
try {
|
||||
previousRelease = await github.rest.repos.getReleaseByTag({
|
||||
...commonOptions,
|
||||
tag: "nightly",
|
||||
});
|
||||
} catch (e) {
|
||||
console.log("No previous nightly release");
|
||||
// ignore, there was no previous nightly release
|
||||
}
|
||||
if (previousRelease !== undefined) {
|
||||
// delete previous release and nightly tag
|
||||
await github.rest.repos.deleteRelease({
|
||||
...commonOptions,
|
||||
release_id: previousRelease.data.id,
|
||||
});
|
||||
await github.rest.git.deleteRef({...commonOptions, ref: "tags/nightly"});
|
||||
}
|
||||
// create draft release
|
||||
const newReleaseId = (await github.rest.repos.createRelease({
|
||||
...commonOptions,
|
||||
tag_name: "nightly",
|
||||
target_commitish: "${{ github.sha }}",
|
||||
name: "Nightly builds",
|
||||
body: "Async-profiler binaries published automatically from the latest sources in `master` upon a successful build.",
|
||||
prerelease: true,
|
||||
draft: true,
|
||||
})).data.id;
|
||||
// upload binaries and jars to draft release
|
||||
for (const archiveName of await fs.readdir(process.cwd())) {
|
||||
await github.rest.repos.uploadReleaseAsset({
|
||||
...commonOptions,
|
||||
release_id: newReleaseId,
|
||||
name: archiveName,
|
||||
data: await fs.readFile(archiveName),
|
||||
});
|
||||
}
|
||||
// publish release
|
||||
await github.rest.repos.updateRelease({
|
||||
...commonOptions,
|
||||
release_id: newReleaseId,
|
||||
draft: false,
|
||||
});
|
||||
3
.gitignore
vendored
@@ -1,10 +1,7 @@
|
||||
/build/
|
||||
/nbproject/
|
||||
/out/
|
||||
/target/
|
||||
/.idea/
|
||||
/test/*.class
|
||||
.vscode
|
||||
*.iml
|
||||
/src/api/**/*.class
|
||||
.gdb_history
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
header:
|
||||
- paths:
|
||||
- 'src/jattach'
|
||||
license:
|
||||
content: |
|
||||
Copyright The jattach authors
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
comment: on-failure
|
||||
|
||||
- paths:
|
||||
- 'src'
|
||||
- 'test'
|
||||
paths-ignore:
|
||||
- 'src/jattach'
|
||||
- 'src/res'
|
||||
license:
|
||||
content: |
|
||||
Copyright The async-profiler authors
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
comment: on-failure
|
||||
11
.travis.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
language: cpp
|
||||
|
||||
dist: precise
|
||||
|
||||
sudo: required
|
||||
|
||||
before_install:
|
||||
- sudo apt-get install default-jdk
|
||||
- sudo bash -c 'echo 1 > /proc/sys/kernel/perf_event_paranoid'
|
||||
|
||||
script: make && make test
|
||||
285
CHANGELOG.md
@@ -1,300 +1,21 @@
|
||||
# Changelog
|
||||
|
||||
## [4.0] - 2025-04-08
|
||||
|
||||
### Features
|
||||
- #895, #905: `jfrconv` binary and numerous converter enhancements
|
||||
- #944: Interactive Heatmap
|
||||
- #1064: Native memory leak profiler
|
||||
- #1002: An option to display instruction addresses
|
||||
- #1007: Optimize wall clock profiling
|
||||
- #1073: Productize VMStructs-based stack walker: `--cstack vm/vmx`
|
||||
- #1169: C API for accessing thread-local profiling context
|
||||
|
||||
### Improvements
|
||||
- #923: Support JDK 23+
|
||||
- #952: Solve musl and glibc compatibility issues; link `libstdc++` statically
|
||||
- #955: `--libpath` option to specify path to `libasyncProfiler.so` in a container
|
||||
- #1018: `--grain` converter option to coarsen flame graphs
|
||||
- #1046: `--nostop` option to continue profiling outside `--begin`/`--end` window
|
||||
- #1178: `--inverted` option to flip flame graphs vertically
|
||||
- #1009: Allows collecting allocation and live object traces at the same time
|
||||
- #925: An option to accumulate JFR events in memory instead of flushing to a file
|
||||
- #929: Load symbols from debuginfod cache
|
||||
- #982: Sample contended locks by overflowing interval bucket
|
||||
- #993: Filter native frames in allocation profile
|
||||
- #896: FlameGraph: `Alt+Click` to remove stacks
|
||||
- #1097: FlameGraph: `N`/`Shift+N` to navigate through search results
|
||||
- #1182: Retain by-thread grouping when reversing FlameGraph
|
||||
- #1167: Log when no samples are collected
|
||||
- #1044: Fall back to `ctimer` for CPU profiling when perf_events are unavailable
|
||||
- #1068: Count missed samples when estimating total CPU time in `ctimer` mode
|
||||
- #1142: Use counter-timer register for timestamps on ARM64
|
||||
- #1123: Support `clock=tsc` without a JVM
|
||||
- #1070: Demangle Rust v0 symbols
|
||||
- #1007: Use `ExecutionSample` event for CPU profiling and `WallClockSample` for Wall clock profiling
|
||||
- #1011: Obtain `can_generate_sampled_object_alloc_events` JVMTI capability only when needed
|
||||
- #1013: Intercept java.util.concurrent locks more efficiently
|
||||
- #759: Discover available profiling signal automatically
|
||||
- #884: Record event timestamps early
|
||||
- #885: Print error message if JVM fails to load libasyncProfiler
|
||||
- #892: Resolve tracepoint id in `asprof`
|
||||
- Suppress dynamic attach warning on JDK 21+
|
||||
|
||||
### Bug fixes
|
||||
- #1143: Crash on macOS when using thread filter
|
||||
- #1125: Fixed parsing concurrently loaded libraries
|
||||
- #1095: jfr print fails when a recording has empty pools
|
||||
- #1084: Fixed Logging related races
|
||||
- #1074: Parse both .rela.dyn and .rela.plt sections
|
||||
- #1003: Support both tracefs and debugfs for kernel tracepoints
|
||||
- #986: Profiling output respects loglevel
|
||||
- #981: Avoid JVM crash by deleting JNI refs after `GetMethodDeclaringClass`
|
||||
- #934: Fix crash on Zing in a native thread
|
||||
- #843: Fix race between parsing and concurrent unloading of shared libraries
|
||||
- #1147, #1151: Deadlocks with jemalloc and tcmalloc profilers
|
||||
- Stack walking fixes for ARM64
|
||||
- Converter fixes for `jfrsync` profiles
|
||||
- Fixed parsing non-PIC executables and shared objects with non-standard section layout
|
||||
- Fixed recursion in `pthread_create` when using native profiling API
|
||||
- Fixed crashes on Alpine when profiling native apps
|
||||
- Fixed warnings with `-Xcheck:jni`
|
||||
- Fixed "Unsupported JVM" on OpenJ9 JDK 21
|
||||
- Fixed DefineClass crash on OpenJ9
|
||||
- JfrReader should handle custom events properly
|
||||
- Handle truncated JFRs
|
||||
|
||||
### Project Infrastructure
|
||||
- Restructure and update documentation
|
||||
- Implement test framework; add new integration tests
|
||||
- Unit test framework for C++ code
|
||||
- Run CI on all supported platforms
|
||||
- Test multiple JDK versions in CI
|
||||
- Add GHA to validate license headers
|
||||
- Add Markdown checker and formatter
|
||||
- Add Issue and Pull Request templates
|
||||
- Add Contributing Guidelines and Code of Conduct
|
||||
- Run static analyzer and fix found issues (#1034, #1039, #1049, #1051, #1098)
|
||||
- Provide Dockerfile for building async-profiler release packages
|
||||
- Publish nightly builds automatically
|
||||
|
||||
## [3.0] - 2024-01-20
|
||||
|
||||
### Features
|
||||
- #724: Binary launcher `asprof`
|
||||
- #751: Profile non-Java processes
|
||||
- #795: AsyncGetCallTrace replacement
|
||||
- #719: Classify execution samples into categories in JFR converter
|
||||
- #855: `ctimer` mode for accurate profiling without perf_events
|
||||
- #740: Profile CPU + Wall clock together
|
||||
- #736: Show targets of vtable/itable calls
|
||||
- #777: Show JIT compilation task
|
||||
- #644: RISC-V port
|
||||
- #770: LoongArch64 port
|
||||
|
||||
### Improvements
|
||||
- #733: Make the same `libasyncProfiler` work with both glibc and musl
|
||||
- #734: Support raw PMU event descriptors
|
||||
- #759: Configure alternative profiling signal
|
||||
- #761: Parse dynamic linking structures
|
||||
- #723: `--clock` option to select JFR timestamp source
|
||||
- #750: `--jfrsync` may specify a list of JFR events
|
||||
- #849: Parse concatenated multi-chunk JFRs
|
||||
- #833: Time-to-safepoint JFR event
|
||||
- #832: Normalize names of hidden classes / lambdas
|
||||
- #864: Reduce size of HTML Flame Graph
|
||||
- #783: Shutdown asprof gracefully on SIGTERM
|
||||
- Better demangling of C++ and Rust symbols
|
||||
- DWARF unwinding for ARM64
|
||||
- `JfrReader` can parse in-memory buffer
|
||||
- Support custom events in `JfrReader`
|
||||
- An option to read JFR file by chunks
|
||||
- Record `GCHeapSummary` events in JFR
|
||||
|
||||
### Bug fixes
|
||||
- Workaround macOS crashes in SafeFetch
|
||||
- Fixed attach to OpenJ9 on macOS
|
||||
- Support `UseCompressedObjectHeaders` aka Lilliput
|
||||
- Fixed allocation profiling on JDK 20.0.x
|
||||
- Fixed context-switches profiling
|
||||
- Prefer ObjectSampler to TLAB hooks for allocation profiling
|
||||
- Improved accuracy of ObjectSampler in `--total` mode
|
||||
- Make Flame Graph status line and search results always visible
|
||||
- `loop` and `timeout` options did not work in some modes
|
||||
- Restart interrupted poll/epoll_wait syscalls
|
||||
- Fixed stack unwinding issues on ARM64
|
||||
- Workaround for stale jmethodIDs
|
||||
- Calculate ELF base address correctly
|
||||
- Do not dump redundant threads in a JFR chunk
|
||||
- `check` action prints result to a file
|
||||
- Annotate JFR unit types with `@ContentType`
|
||||
|
||||
## [2.9] - 2022-11-27
|
||||
|
||||
### Features
|
||||
- Java Heap leak profiler
|
||||
- `meminfo` command to print profiler's memory usage
|
||||
- Profiler API with embedded agent as a Maven artifact
|
||||
|
||||
### Improvements
|
||||
- `--include`/`--exclude` options in the FlameGraph converter
|
||||
- `--simple` and `--dot` options in jfr2flame converter
|
||||
- An option for agressive recovery of `[unknown_Java]` stack traces
|
||||
- Do not truncate signatures in collapsed format
|
||||
- Display inlined frames under a runtime stub
|
||||
|
||||
### Bug fixes
|
||||
- Profiler did not work with Homebrew JDK
|
||||
- Fixed allocation profiling on Zing
|
||||
- Various `jfrsync` fixes
|
||||
- Symbol parsing fixes
|
||||
- Attaching to a container on Linux 3.x could fail
|
||||
|
||||
## [2.8.3] - 2022-07-16
|
||||
|
||||
### Improvements
|
||||
- Support virtualized ARM64 macOS
|
||||
- A switch to generate auxiliary events by async-profiler or FlightRecorder in jfrsync mode
|
||||
|
||||
### Bug fixes
|
||||
- Could not recreate perf_events after the first failure
|
||||
- Handle different versions of Zing properly
|
||||
- Do not call System.loadLibrary, when libasyncProfiler is preloaded
|
||||
|
||||
## [2.8.2] - 2022-07-13
|
||||
|
||||
### Bug fixes
|
||||
- The same .so works with glibc and musl
|
||||
- dlopen hook did not work on Arch Linux
|
||||
- Fixed JDK 7 crash
|
||||
- Fixed CPU profiling on Zing
|
||||
|
||||
### Changes
|
||||
- Mark interpreted frames with `_[0]` in collapsed output
|
||||
- Double click selects a method name on a flame graph
|
||||
|
||||
## [2.8.1] - 2022-06-10
|
||||
|
||||
### Improvements
|
||||
- JFR to pprof converter (contributed by @NeQuissimus)
|
||||
- JFR converter improvements: time range, collapsed output, pattern highlighting
|
||||
- `%n` pattern in file names; limit number of output files
|
||||
- `--lib` to customize profiler library path in a container
|
||||
- `profiler.sh list` command now works without PID
|
||||
|
||||
### Bug fixes
|
||||
- Fixed crashes related to continuous profiling
|
||||
- Fixed Alpine/musl compatibility issues
|
||||
- Fixed incomplete collapsed output due to weird locale settings
|
||||
- Workaround for JDK-8185348
|
||||
|
||||
## [2.8] - 2022-05-09
|
||||
|
||||
### Features
|
||||
- Mark top methods as interpreted, compiled (C1/C2), or inlined
|
||||
- JVM TI based allocation profiling for JDK 11+
|
||||
- Embedded HTTP management server
|
||||
|
||||
### Improvements
|
||||
- Re-implemented stack recovery for better reliability
|
||||
- Add `loglevel` argument
|
||||
- Do not mmap perf page in `--all-user` mode
|
||||
- Distinguish runnable/sleeping threads in OpenJ9 wall-clock profiler
|
||||
- `--cpu` converter option to extract CPU profile from the wall-clock output
|
||||
|
||||
## [2.7] - 2022-02-14
|
||||
|
||||
### Features
|
||||
- Experimental support for OpenJ9 VM
|
||||
- DWARF stack unwinding
|
||||
|
||||
### Improvements
|
||||
- Better handling of VM threads (fixed missing JIT threads)
|
||||
- More reliable recovery from `not_walkable` AGCT failures
|
||||
- Do not accept unknown agent arguments
|
||||
|
||||
## [2.6] - 2022-01-09
|
||||
|
||||
### Features
|
||||
- Continuous profiling; `loop` and `timeout` options
|
||||
|
||||
### Improvements
|
||||
- Reliability improvements: avoid certain crashes and deadlocks
|
||||
- Smaller and faster agent library
|
||||
- Minor `jfr` and `jfrsync` enhancements (see the commit log)
|
||||
|
||||
## [2.5.1] - 2021-12-05
|
||||
|
||||
### Bug fixes
|
||||
- Prevent early unloading of libasyncProfiler.so
|
||||
- Read kernel symbols only for perf_events
|
||||
- Escape backslashes in flame graphs
|
||||
- Avoid duplicate categories in `jfrsync` mode
|
||||
- Fixed stack overflow in RedefineClasses
|
||||
- Fixed deadlock when flushing JFR
|
||||
|
||||
### Improvements
|
||||
- Support OpenJDK C++ Interpreter (aka Zero)
|
||||
- Allow reading incomplete JFR recordings
|
||||
|
||||
## [2.5] - 2021-10-01
|
||||
|
||||
### Features
|
||||
- macOS/ARM64 (aka Apple M1) port
|
||||
- PPC64LE port (contributed by @ghaug)
|
||||
- Profile low-privileged processes with perf_events (contributed by @Jongy)
|
||||
- Raw PMU events; kprobes & uprobes
|
||||
- Dump results in the middle of profiling session
|
||||
- Chunked JFR; support JFR files larger than 2 GB
|
||||
- Integrate async-profiler events with JDK Flight Recordings
|
||||
|
||||
### Improvements
|
||||
- Use RDTSC for JFR timestamps when possible
|
||||
- Show line numbers and bci in Flame Graphs
|
||||
- jfr2flame can produce Allocation and Lock flame graphs
|
||||
- Flame Graph title depends on the event and `--total`
|
||||
- Include profiler logs and native library list in JFR output
|
||||
- Lock profiling no longer requires JVM symbols
|
||||
- Better container support
|
||||
- Native function profiler can count the specified argument
|
||||
- An option to group threads by scheduling policy
|
||||
- An option to prepend library name to native symbols
|
||||
|
||||
### Notes
|
||||
- macOS build is provided as a fat binary that works both on x86-64 and ARM64
|
||||
- 32-bit binaries are no longer shipped. It is still possible to build them from sources
|
||||
- Dropped JDK 6 support (may still work though)
|
||||
|
||||
## [2.0] - 2021-03-14
|
||||
## [2.0-b1] - Early access
|
||||
|
||||
### Features
|
||||
- Profile multiple events together (cpu + alloc + lock)
|
||||
- HTML 5 Flame Graphs: faster rendering, smaller size
|
||||
- JFR v2 output format, compatible with FlightRecorder API
|
||||
- JFR to Flame Graph converter
|
||||
- Automatically turn profiling on/off at `--begin`/`--end` functions
|
||||
- Time-to-safepoint profiling: `--ttsp`
|
||||
- Time-to-safepoint profiling
|
||||
|
||||
### Improvements
|
||||
- Unlimited frame buffer. Removed `-b` option and 64K stack traces limit
|
||||
- Additional JFR events: OS, CPU, and JVM information; CPU load
|
||||
- Record bytecode indices / line numbers
|
||||
- Native stack traces for Java events
|
||||
- Improved CLI experience
|
||||
- Better error handling; an option to log warnings/errors to a dedicated stream
|
||||
- Reduced the amount of unknown stack traces
|
||||
- Record CPU load in JFR format
|
||||
|
||||
### Changes
|
||||
- Removed non-ASL code. No more CDDL license
|
||||
|
||||
## [1.8.4] - 2021-02-24
|
||||
|
||||
### Improvements
|
||||
- Smaller and faster agent library
|
||||
|
||||
### Bug fixes
|
||||
- Fixed JDK 7 crash during wall-clock profiling
|
||||
|
||||
## [1.8.3] - 2021-01-06
|
||||
|
||||
### Improvements
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
## Code of Conduct
|
||||
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
|
||||
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
|
||||
opensource-codeofconduct@amazon.com with any additional questions or comments.
|
||||
@@ -1,59 +0,0 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
|
||||
documentation, we greatly value feedback and contributions from our community.
|
||||
|
||||
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
|
||||
information to effectively respond to your bug report or contribution.
|
||||
|
||||
|
||||
## Security issue notifications
|
||||
If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue.
|
||||
|
||||
|
||||
## Reporting Bugs/Feature Requests
|
||||
|
||||
We welcome you to use the GitHub issue tracker to report bugs or suggest features.
|
||||
|
||||
When filing an issue, please check [existing open](https://github.com/async-profiler/async-profiler/issues), or [recently closed](https://github.com/async-profiler/async-profiler/issues?q=is%3Aissue+is%3Aclosed), issues to make sure somebody else hasn't already
|
||||
reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
|
||||
|
||||
* A reproducible test case or series of steps
|
||||
* The version of our code being used
|
||||
* Any modifications you've made relevant to the bug
|
||||
* Anything unusual about your environment or deployment
|
||||
|
||||
|
||||
## Contributing via Pull Requests
|
||||
Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
|
||||
|
||||
1. You are working against the latest source on the *master* branch.
|
||||
2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
|
||||
3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
|
||||
|
||||
To send us a pull request, please:
|
||||
|
||||
1. Fork the repository.
|
||||
2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
|
||||
3. Ensure local tests pass.
|
||||
4. Commit to your fork using clear commit messages.
|
||||
5. Send us a pull request, answering any default questions in the pull request interface.
|
||||
6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
|
||||
|
||||
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
|
||||
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
|
||||
## Finding contributions to work on
|
||||
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/async-profiler/async-profiler/labels/help%20wanted) issues is a great place to start.
|
||||
|
||||
|
||||
## Code of Conduct
|
||||
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
|
||||
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
|
||||
opensource-codeofconduct@amazon.com with any additional questions or comments.
|
||||
|
||||
|
||||
## Licensing
|
||||
|
||||
See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
|
||||
312
Makefile
@@ -1,286 +1,112 @@
|
||||
PROFILER_VERSION ?= 4.0
|
||||
PROFILER_VERSION=2.0-b1
|
||||
JATTACH_VERSION=1.5
|
||||
JAVAC_RELEASE_VERSION=6
|
||||
|
||||
ifeq ($(COMMIT_TAG),true)
|
||||
PROFILER_VERSION := $(PROFILER_VERSION)-$(shell git rev-parse --short=8 HEAD)
|
||||
else ifneq ($(COMMIT_TAG),)
|
||||
PROFILER_VERSION := $(PROFILER_VERSION)-$(COMMIT_TAG)
|
||||
endif
|
||||
|
||||
COMMA=,
|
||||
PACKAGE_NAME=async-profiler-$(PROFILER_VERSION)-$(OS_TAG)-$(ARCH_TAG)
|
||||
PACKAGE_DIR=/tmp/$(PACKAGE_NAME)
|
||||
DEBUG_PACKAGE_NAME=$(PACKAGE_NAME)-debug
|
||||
DEBUG_PACKAGE_DIR=$(PACKAGE_DIR)-debug
|
||||
|
||||
ASPROF=bin/asprof
|
||||
JFRCONV=bin/jfrconv
|
||||
LIB_PROFILER=lib/libasyncProfiler.$(SOEXT)
|
||||
LIB_PROFILER_DEBUG=libasyncProfiler.$(SOEXT).debug
|
||||
ASPROF_HEADER=include/asprof.h
|
||||
API_JAR=jar/async-profiler.jar
|
||||
CONVERTER_JAR=jar/jfr-converter.jar
|
||||
TEST_JAR=test.jar
|
||||
LIB_PROFILER=libasyncProfiler.$(SOEXT)
|
||||
LIB_PROFILER_SO=libasyncProfiler.so
|
||||
JATTACH=jattach
|
||||
API_JAR=async-profiler.jar
|
||||
CONVERTER_JAR=converter.jar
|
||||
|
||||
CC ?= gcc
|
||||
CXX ?= g++
|
||||
STRIP ?= strip
|
||||
OBJCOPY ?= objcopy
|
||||
|
||||
ifneq ($(CROSS_COMPILE),)
|
||||
CC := $(CROSS_COMPILE)gcc
|
||||
CXX := $(CROSS_COMPILE)g++
|
||||
STRIP := $(CROSS_COMPILE)strip
|
||||
OBJCOPY := $(CROSS_COMPILE)objcopy
|
||||
endif
|
||||
|
||||
CFLAGS_EXTRA ?=
|
||||
CXXFLAGS_EXTRA ?=
|
||||
CFLAGS=-O3 -fno-exceptions $(CFLAGS_EXTRA)
|
||||
CXXFLAGS=-O3 -fno-exceptions -fno-omit-frame-pointer -fvisibility=hidden -std=c++11 $(CXXFLAGS_EXTRA)
|
||||
CPPFLAGS=
|
||||
DEFS=-DPROFILER_VERSION=\"$(PROFILER_VERSION)\"
|
||||
INCLUDES=-I$(JAVA_HOME)/include -Isrc/helper
|
||||
CFLAGS=-O3 -fno-omit-frame-pointer -fvisibility=hidden
|
||||
CXXFLAGS=-O3 -fno-omit-frame-pointer -fvisibility=hidden
|
||||
INCLUDES=-I$(JAVA_HOME)/include
|
||||
LIBS=-ldl -lpthread
|
||||
MERGE=true
|
||||
GCOV ?= gcov
|
||||
|
||||
JAVAC=$(JAVA_HOME)/bin/javac
|
||||
JAR=$(JAVA_HOME)/bin/jar
|
||||
JAVA=$(JAVA_HOME)/bin/java
|
||||
JAVA_TARGET=8
|
||||
JAVAC_OPTIONS=--release $(JAVA_TARGET) -Xlint:-options
|
||||
|
||||
TEST_LIB_DIR=build/test/lib
|
||||
TEST_BIN_DIR=build/test/bin
|
||||
LOG_DIR=build/test/logs
|
||||
LOG_LEVEL=
|
||||
SKIP=
|
||||
TEST_FLAGS=-DlogDir=$(LOG_DIR) -DlogLevel=$(LOG_LEVEL) -Dskip='$(subst $(COMMA), ,$(SKIP))'
|
||||
|
||||
# always sort SOURCES so zInit is last.
|
||||
SOURCES := $(sort $(wildcard src/*.cpp))
|
||||
SOURCES := $(wildcard src/*.cpp)
|
||||
HEADERS := $(wildcard src/*.h)
|
||||
RESOURCES := $(wildcard src/res/*)
|
||||
JAVA_HELPER_CLASSES := $(wildcard src/helper/one/profiler/*.class)
|
||||
API_SOURCES := $(wildcard src/api/one/profiler/*.java)
|
||||
CONVERTER_SOURCES := $(shell find src/converter -name '*.java')
|
||||
TEST_SOURCES := $(shell find test -name '*.java')
|
||||
TESTS ?=
|
||||
CPP_TEST_SOURCES := test/native/testRunner.cpp $(shell find test/native -name '*Test.cpp')
|
||||
CPP_TEST_HEADER := test/native/testRunner.hpp
|
||||
CPP_TEST_INCLUDES := -Isrc -Itest/native
|
||||
|
||||
ifeq ($(JAVA_HOME),)
|
||||
JAVA_HOME:=$(shell java -cp . JavaHome)
|
||||
export JAVA_HOME:=$(shell java -cp . JavaHome)
|
||||
endif
|
||||
|
||||
OS:=$(shell uname -s)
|
||||
ifeq ($(OS),Darwin)
|
||||
CXXFLAGS += -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -Wl,-rpath,@executable_path/../lib -Wl,-rpath,@executable_path/../lib/server
|
||||
ifeq ($(OS), Darwin)
|
||||
CXXFLAGS += -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE
|
||||
INCLUDES += -I$(JAVA_HOME)/include/darwin
|
||||
SOEXT=dylib
|
||||
PACKAGE_EXT=zip
|
||||
OS_TAG=macos
|
||||
ifeq ($(FAT_BINARY),true)
|
||||
FAT_BINARY_FLAGS=-arch x86_64 -arch arm64 -mmacos-version-min=10.15
|
||||
CFLAGS += $(FAT_BINARY_FLAGS)
|
||||
CXXFLAGS += $(FAT_BINARY_FLAGS)
|
||||
PACKAGE_NAME=async-profiler-$(PROFILER_VERSION)-$(OS_TAG)
|
||||
MERGE=false
|
||||
endif
|
||||
else
|
||||
CXXFLAGS += -U_FORTIFY_SOURCE -Wl,-z,defs -Wl,--exclude-libs,ALL -static-libstdc++ -static-libgcc -fdata-sections -ffunction-sections -Wl,--gc-sections -ggdb
|
||||
ifeq ($(MERGE),true)
|
||||
CXXFLAGS += -fwhole-program
|
||||
endif
|
||||
LIBS += -lrt
|
||||
INCLUDES += -I$(JAVA_HOME)/include/linux
|
||||
SOEXT=so
|
||||
PACKAGE_EXT=tar.gz
|
||||
OS_TAG=linux
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH_TAG),)
|
||||
ARCH:=$(shell uname -m)
|
||||
ifeq ($(ARCH),x86_64)
|
||||
ARCH_TAG=x64
|
||||
else ifeq ($(ARCH),aarch64)
|
||||
ARCH_TAG=arm64
|
||||
else ifeq ($(ARCH),arm64)
|
||||
ARCH_TAG=arm64
|
||||
else ifeq ($(findstring arm,$(ARCH)),arm)
|
||||
ARCH_TAG=arm32
|
||||
else ifeq ($(ARCH),ppc64le)
|
||||
ARCH_TAG=ppc64le
|
||||
else ifeq ($(ARCH),riscv64)
|
||||
ARCH_TAG=riscv64
|
||||
else ifeq ($(ARCH),loongarch64)
|
||||
ARCH_TAG=loongarch64
|
||||
ifeq ($(findstring musl,$(shell ldd /bin/ls)),musl)
|
||||
OS_TAG=linux-musl
|
||||
else
|
||||
ARCH_TAG=x86
|
||||
OS_TAG=linux
|
||||
endif
|
||||
endif
|
||||
|
||||
STATIC_BINARY=$(findstring musl-gcc,$(CC))
|
||||
ifneq (,$(STATIC_BINARY))
|
||||
CFLAGS += -static -fdata-sections -ffunction-sections -Wl,--gc-sections
|
||||
endif
|
||||
|
||||
ifneq (,$(findstring $(ARCH_TAG),x86 x64 arm64))
|
||||
CXXFLAGS += -momit-leaf-frame-pointer
|
||||
endif
|
||||
|
||||
|
||||
.PHONY: all jar release build-test test clean coverage clean-coverage build-test-java build-test-cpp build-test-libs build-test-bins test-cpp test-java check-md format-md
|
||||
|
||||
all: build/bin build/lib build/$(LIB_PROFILER) build/$(ASPROF) jar build/$(JFRCONV) build/$(ASPROF_HEADER)
|
||||
|
||||
jar: build/jar build/$(API_JAR) build/$(CONVERTER_JAR)
|
||||
|
||||
release: $(PACKAGE_NAME).$(PACKAGE_EXT)
|
||||
|
||||
$(PACKAGE_NAME).tar.gz: $(PACKAGE_DIR)
|
||||
patchelf --remove-needed ld-linux-x86-64.so.2 --remove-needed ld-linux-aarch64.so.1 $(PACKAGE_DIR)/$(LIB_PROFILER)
|
||||
tar czf $@ -C $(PACKAGE_DIR)/.. $(PACKAGE_NAME)
|
||||
rm -r $(PACKAGE_DIR)
|
||||
|
||||
tar czf $(DEBUG_PACKAGE_NAME).tar.gz -C $(DEBUG_PACKAGE_DIR)/.. $(DEBUG_PACKAGE_NAME)
|
||||
rm -r $(DEBUG_PACKAGE_DIR)
|
||||
|
||||
$(PACKAGE_NAME).zip: $(PACKAGE_DIR)
|
||||
truncate -cs -`stat -f "%z" build/$(CONVERTER_JAR)` $(PACKAGE_DIR)/$(JFRCONV)
|
||||
ifneq ($(GITHUB_ACTIONS), true)
|
||||
codesign -s "Developer ID" -o runtime --timestamp -v $(PACKAGE_DIR)/$(ASPROF) $(PACKAGE_DIR)/$(JFRCONV) $(PACKAGE_DIR)/$(LIB_PROFILER)
|
||||
endif
|
||||
cat build/$(CONVERTER_JAR) >> $(PACKAGE_DIR)/$(JFRCONV)
|
||||
ditto -c -k --keepParent $(PACKAGE_DIR) $@
|
||||
rm -r $(PACKAGE_DIR)
|
||||
|
||||
$(PACKAGE_DIR): all LICENSE README.md
|
||||
rm -rf $@
|
||||
mkdir -p $(PACKAGE_DIR) $(DEBUG_PACKAGE_DIR)
|
||||
cp -RP build/bin build/lib build/include LICENSE README.md $(PACKAGE_DIR)/
|
||||
chmod -R 755 $(PACKAGE_DIR)
|
||||
chmod 644 $(PACKAGE_DIR)/lib/* $(PACKAGE_DIR)/include/* $(PACKAGE_DIR)/LICENSE $(PACKAGE_DIR)/README.md
|
||||
|
||||
ifeq ($(OS_TAG),linux)
|
||||
$(STRIP) --only-keep-debug build/$(LIB_PROFILER) -o $(DEBUG_PACKAGE_DIR)/$(LIB_PROFILER_DEBUG)
|
||||
$(STRIP) -g $@/$(LIB_PROFILER)
|
||||
$(OBJCOPY) --add-gnu-debuglink=$(DEBUG_PACKAGE_DIR)/$(LIB_PROFILER_DEBUG) $@/$(LIB_PROFILER)
|
||||
chmod 644 $(DEBUG_PACKAGE_DIR)/*
|
||||
endif
|
||||
|
||||
build/%:
|
||||
mkdir -p $@
|
||||
|
||||
build/$(ASPROF): src/main/* src/jattach/* src/fdtransfer.h
|
||||
$(CC) $(CPPFLAGS) $(CFLAGS) $(DEFS) -o $@ src/main/*.cpp src/jattach/*.c
|
||||
$(STRIP) $@
|
||||
|
||||
build/$(JFRCONV): src/launcher/* build/$(CONVERTER_JAR)
|
||||
$(CC) $(CPPFLAGS) $(CFLAGS) $(DEFS) -o $@ src/launcher/*.cpp
|
||||
$(STRIP) $@
|
||||
cat build/$(CONVERTER_JAR) >> $@
|
||||
|
||||
build/$(LIB_PROFILER): $(SOURCES) $(HEADERS) $(RESOURCES) $(JAVA_HELPER_CLASSES)
|
||||
ifeq ($(MERGE),true)
|
||||
for f in src/*.cpp; do echo '#include "'$$f'"'; done |\
|
||||
$(CXX) $(CPPFLAGS) $(CXXFLAGS) $(DEFS) $(INCLUDES) -fPIC -shared -o $@ -xc++ - $(LIBS)
|
||||
ARCH:=$(shell uname -m)
|
||||
ifeq ($(ARCH),x86_64)
|
||||
ARCH_TAG=x64
|
||||
else
|
||||
$(CXX) $(CPPFLAGS) $(CXXFLAGS) $(DEFS) $(INCLUDES) -fPIC -shared -o $@ $(SOURCES) $(LIBS)
|
||||
ifeq ($(findstring arm,$(ARCH)),arm)
|
||||
ARCH_TAG=arm
|
||||
else
|
||||
ifeq ($(findstring aarch64,$(ARCH)),aarch64)
|
||||
ARCH_TAG=aarch64
|
||||
else
|
||||
ARCH_TAG=x86
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
build/$(ASPROF_HEADER): src/asprof.h
|
||||
mkdir -p build/include
|
||||
cp -f $< build/include
|
||||
|
||||
.PHONY: all release test clean
|
||||
|
||||
all: build build/$(LIB_PROFILER) build/$(JATTACH) build/$(API_JAR) build/$(CONVERTER_JAR)
|
||||
|
||||
release: build $(PACKAGE_NAME).tar.gz
|
||||
|
||||
$(PACKAGE_NAME).tar.gz: build/$(LIB_PROFILER) build/$(JATTACH) \
|
||||
build/$(API_JAR) build/$(CONVERTER_JAR) \
|
||||
profiler.sh LICENSE *.md
|
||||
mkdir -p $(PACKAGE_DIR)
|
||||
cp -RP build profiler.sh LICENSE *.md $(PACKAGE_DIR)
|
||||
chmod -R 755 $(PACKAGE_DIR)
|
||||
chmod 644 $(PACKAGE_DIR)/LICENSE $(PACKAGE_DIR)/*.md $(PACKAGE_DIR)/build/*.jar
|
||||
tar cvzf $@ -C $(PACKAGE_DIR)/.. $(PACKAGE_NAME)
|
||||
rm -r $(PACKAGE_DIR)
|
||||
|
||||
%.$(SOEXT): %.so
|
||||
-ln -s $(<F) $@
|
||||
|
||||
build:
|
||||
mkdir -p build
|
||||
|
||||
build/$(LIB_PROFILER_SO): $(SOURCES) $(HEADERS)
|
||||
$(CXX) $(CXXFLAGS) -DPROFILER_VERSION=\"$(PROFILER_VERSION)\" $(INCLUDES) -fPIC -shared -o $@ $(SOURCES) $(LIBS)
|
||||
|
||||
build/$(JATTACH): src/jattach/jattach.c
|
||||
$(CC) $(CFLAGS) -DJATTACH_VERSION=\"$(JATTACH_VERSION)\" -o $@ $^
|
||||
|
||||
build/$(API_JAR): $(API_SOURCES)
|
||||
mkdir -p build/api
|
||||
$(JAVAC) $(JAVAC_OPTIONS) -d build/api $(API_SOURCES)
|
||||
$(JAR) cf $@ -C build/api .
|
||||
$(JAVAC) -source $(JAVAC_RELEASE_VERSION) -target $(JAVAC_RELEASE_VERSION) -d build/api $^
|
||||
$(JAR) cvf $@ -C build/api .
|
||||
$(RM) -r build/api
|
||||
|
||||
build/$(CONVERTER_JAR): $(CONVERTER_SOURCES) $(RESOURCES)
|
||||
build/$(CONVERTER_JAR): $(CONVERTER_SOURCES) src/converter/MANIFEST.MF
|
||||
mkdir -p build/converter
|
||||
$(JAVAC) $(JAVAC_OPTIONS) -d build/converter $(CONVERTER_SOURCES)
|
||||
$(JAR) cfe $@ Main -C build/converter . -C src/res .
|
||||
$(JAVAC) -source 7 -target 7 -d build/converter $(CONVERTER_SOURCES)
|
||||
$(JAR) cvfm $@ src/converter/MANIFEST.MF -C build/converter .
|
||||
$(RM) -r build/converter
|
||||
|
||||
%.class: %.java
|
||||
$(JAVAC) -source 7 -target 7 -Xlint:-options -g:none $^
|
||||
|
||||
build/test/cpptests: $(CPP_TEST_SOURCES) $(CPP_TEST_HEADER) $(SOURCES) $(HEADERS) $(RESOURCES) $(JAVA_HELPER_CLASSES)
|
||||
mkdir -p build/test
|
||||
ifeq ($(MERGE),true)
|
||||
for f in src/*.cpp test/native/*.cpp; do echo '#include "'$$f'"'; done |\
|
||||
$(CXX) $(CPPFLAGS) $(CXXFLAGS) $(DEFS) $(INCLUDES) $(CPP_TEST_INCLUDES) -fPIC -o $@ -xc++ - $(LIBS)
|
||||
else
|
||||
$(CXX) $(CPPFLAGS) $(CXXFLAGS) $(DEFS) $(INCLUDES) $(CPP_TEST_INCLUDES) -fPIC -o $@ $(SOURCES) $(CPP_TEST_SOURCES) $(LIBS)
|
||||
endif
|
||||
|
||||
build-test-java: all build/$(TEST_JAR) build-test-libs build-test-bins
|
||||
|
||||
build-test-cpp: build/test/cpptests build-test-libs
|
||||
|
||||
build-test: build-test-cpp build-test-java
|
||||
|
||||
build-test-libs:
|
||||
@mkdir -p $(TEST_LIB_DIR)
|
||||
|
||||
ifeq ($(OS_TAG),linux)
|
||||
$(CC) -shared -fPIC -o $(TEST_LIB_DIR)/libreladyn.$(SOEXT) test/native/libs/reladyn.c
|
||||
$(CC) -shared -fPIC -o $(TEST_LIB_DIR)/libcallsmalloc.$(SOEXT) test/native/libs/callsmalloc.c
|
||||
$(CC) -shared -fPIC $(INCLUDES) -Isrc -o $(TEST_LIB_DIR)/libjnimalloc.$(SOEXT) test/native/libs/jnimalloc.c
|
||||
$(CC) -shared -fPIC -o $(TEST_LIB_DIR)/libmalloc.$(SOEXT) test/native/libs/malloc.c
|
||||
|
||||
$(CC) -c -shared -fPIC -o $(TEST_LIB_DIR)/vaddrdif.o test/native/libs/vaddrdif.c
|
||||
$(LD) -N -shared -o $(TEST_LIB_DIR)/libvaddrdif.$(SOEXT) $(TEST_LIB_DIR)/vaddrdif.o -T test/native/libs/vaddrdif.ld
|
||||
|
||||
$(AS) -o $(TEST_LIB_DIR)/multiplematching.o test/native/libs/multiplematching.s
|
||||
$(LD) -shared -o $(TEST_LIB_DIR)/multiplematching.$(SOEXT) $(TEST_LIB_DIR)/multiplematching.o
|
||||
|
||||
$(AS) -o $(TEST_LIB_DIR)/twiceatzero.o test/native/libs/twiceatzero.s
|
||||
$(LD) -shared -o $(TEST_LIB_DIR)/libtwiceatzero.$(SOEXT) $(TEST_LIB_DIR)/twiceatzero.o --section-start=.seg1=0x4000 -z max-page-size=0x1000
|
||||
endif
|
||||
|
||||
build-test-bins:
|
||||
@mkdir -p $(TEST_BIN_DIR)
|
||||
$(CC) -o $(TEST_BIN_DIR)/malloc_plt_dyn test/test/nativemem/malloc_plt_dyn.c
|
||||
$(CC) -o $(TEST_BIN_DIR)/native_api -Isrc test/test/c/native_api.c -ldl
|
||||
$(CC) -o $(TEST_BIN_DIR)/profile_with_dlopen -Isrc test/test/nativemem/profile_with_dlopen.c -ldl
|
||||
$(CC) -o $(TEST_BIN_DIR)/preload_malloc -Isrc test/test/nativemem/preload_malloc.c -ldl
|
||||
$(CXX) -o $(TEST_BIN_DIR)/non_java_app $(INCLUDES) $(CPP_TEST_INCLUDES) test/test/nonjava/non_java_app.cpp $(LIBS)
|
||||
|
||||
test-cpp: build-test-cpp
|
||||
echo "Running cpp tests..."
|
||||
LD_LIBRARY_PATH="$(TEST_LIB_DIR)" build/test/cpptests
|
||||
|
||||
test-java: build-test-java
|
||||
echo "Running tests against $(LIB_PROFILER)"
|
||||
$(JAVA) "-Djava.library.path=$(TEST_LIB_DIR)" $(TEST_FLAGS) -ea -cp "build/test.jar:build/jar/*:build/lib/*" one.profiler.test.Runner $(subst $(COMMA), ,$(TESTS))
|
||||
|
||||
coverage: override FAT_BINARY=false
|
||||
coverage: clean-coverage
|
||||
$(MAKE) test-cpp CXXFLAGS_EXTRA="-fprofile-arcs -ftest-coverage -fPIC -O0 --coverage"
|
||||
mkdir -p build/test/coverage
|
||||
cd build/test/ && gcovr -r ../.. --html-details --gcov-executable "$(GCOV)" -o coverage/index.html
|
||||
rm -rf -- -.gc*
|
||||
|
||||
test: test-cpp test-java
|
||||
|
||||
build/$(TEST_JAR): $(TEST_SOURCES) build/$(CONVERTER_JAR)
|
||||
mkdir -p build/test
|
||||
$(JAVAC) -source $(JAVA_TARGET) -target $(JAVA_TARGET) -Xlint:-options -cp "build/jar/*:build/converter/*" -d build/test $(TEST_SOURCES)
|
||||
$(JAR) cf $@ -C build/test .
|
||||
|
||||
check-md:
|
||||
prettier -c README.md "docs/**/*.md"
|
||||
|
||||
format-md:
|
||||
prettier -w README.md "docs/**/*.md"
|
||||
|
||||
clean-coverage:
|
||||
$(RM) -rf build/test/cpptests build/test/coverage
|
||||
test: all
|
||||
test/smoke-test.sh
|
||||
test/thread-smoke-test.sh
|
||||
test/alloc-smoke-test.sh
|
||||
test/load-library-test.sh
|
||||
echo "All tests passed"
|
||||
|
||||
clean:
|
||||
$(RM) -r build
|
||||
|
||||
136
README.md
@@ -1,117 +1,53 @@
|
||||
# Async-profiler
|
||||
# async-profiler
|
||||
|
||||
[](https://travis-ci.org/jvm-profiling-tools/async-profiler)
|
||||
|
||||
This project is a low overhead sampling profiler for Java
|
||||
that does not suffer from the [Safepoint bias problem](http://psy-lob-saw.blogspot.ru/2016/02/why-most-sampling-java-profilers-are.html).
|
||||
It features HotSpot-specific API to collect stack traces
|
||||
that does not suffer from [Safepoint bias problem](http://psy-lob-saw.blogspot.ru/2016/02/why-most-sampling-java-profilers-are.html).
|
||||
It features HotSpot-specific APIs to collect stack traces
|
||||
and to track memory allocations. The profiler works with
|
||||
OpenJDK and other Java runtimes based on the HotSpot JVM.
|
||||
OpenJDK, Oracle JDK and other Java runtimes based on the HotSpot JVM.
|
||||
|
||||
Unlike traditional Java profilers, async-profiler monitors non-Java threads
|
||||
(e.g., GC and JIT compiler threads) and shows native and kernel frames in stack traces.
|
||||
async-profiler can trace the following kinds of events:
|
||||
- CPU cycles
|
||||
- Hardware and Software performance counters like cache misses, branch misses, page faults, context switches etc.
|
||||
- Allocations in Java Heap
|
||||
- Contented lock attempts, including both Java object monitors and ReentrantLocks
|
||||
|
||||
## Usage
|
||||
|
||||
What can be profiled:
|
||||
See our [Wiki](https://github.com/jvm-profiling-tools/async-profiler/wiki) or [3 hours playlist](https://www.youtube.com/playlist?list=PLNCLTEx3B8h4Yo_WvKWdLvI9mj1XpTKBr) to learn about all set of features.
|
||||
|
||||
- CPU time
|
||||
- Allocations in Java Heap
|
||||
- Native memory allocations and leaks
|
||||
- Contended locks
|
||||
- Hardware and software performance counters like cache misses, page faults, context switches
|
||||
- and [more](docs/ProfilingModes.md).
|
||||
## Download
|
||||
|
||||
See our [3 hours playlist](https://www.youtube.com/playlist?list=PLNCLTEx3B8h4Yo_WvKWdLvI9mj1XpTKBr)
|
||||
to learn about more features.
|
||||
Latest release (1.8.3):
|
||||
|
||||
# Download
|
||||
- Linux x64 (glibc): [async-profiler-1.8.3-linux-x64.tar.gz](https://github.com/jvm-profiling-tools/async-profiler/releases/download/v1.8.3/async-profiler-1.8.3-linux-x64.tar.gz)
|
||||
- Linux x86 (glibc): [async-profiler-1.8.3-linux-x86.tar.gz](https://github.com/jvm-profiling-tools/async-profiler/releases/download/v1.8.3/async-profiler-1.8.3-linux-x86.tar.gz)
|
||||
- Linux x64 (musl): [async-profiler-1.8.3-linux-musl-x64.tar.gz](https://github.com/jvm-profiling-tools/async-profiler/releases/download/v1.8.3/async-profiler-1.8.3-linux-musl-x64.tar.gz)
|
||||
- Linux ARM: [async-profiler-1.8.3-linux-arm.tar.gz](https://github.com/jvm-profiling-tools/async-profiler/releases/download/v1.8.3/async-profiler-1.8.3-linux-arm.tar.gz)
|
||||
- Linux AArch64: [async-profiler-1.8.3-linux-aarch64.tar.gz](https://github.com/jvm-profiling-tools/async-profiler/releases/download/v1.8.3/async-profiler-1.8.3-linux-aarch64.tar.gz)
|
||||
- macOS x64: [async-profiler-1.8.3-macos-x64.tar.gz](https://github.com/jvm-profiling-tools/async-profiler/releases/download/v1.8.3/async-profiler-1.8.3-macos-x64.tar.gz)
|
||||
|
||||
### Stable release: [4.0](https://github.com/async-profiler/async-profiler/releases/tag/v4.0)
|
||||
[Early access](https://github.com/jvm-profiling-tools/async-profiler/releases/tag/v2.0-b1) (2.0-b1):
|
||||
|
||||
- Linux x64: [async-profiler-4.0-linux-x64.tar.gz](https://github.com/async-profiler/async-profiler/releases/download/v4.0/async-profiler-4.0-linux-x64.tar.gz)
|
||||
- Linux arm64: [async-profiler-4.0-linux-arm64.tar.gz](https://github.com/async-profiler/async-profiler/releases/download/v4.0/async-profiler-4.0-linux-arm64.tar.gz)
|
||||
- macOS arm64/x64: [async-profiler-4.0-macos.zip](https://github.com/async-profiler/async-profiler/releases/download/v4.0/async-profiler-4.0-macos.zip)
|
||||
- Profile converters: [jfr-converter.jar](https://github.com/async-profiler/async-profiler/releases/download/v4.0/jfr-converter.jar)
|
||||
- Linux x64 (glibc): [async-profiler-2.0-b1-linux-x64.tar.gz](https://github.com/jvm-profiling-tools/async-profiler/releases/download/v2.0-b1/async-profiler-2.0-b1-linux-x64.tar.gz)
|
||||
- macOS x64: [async-profiler-2.0-b1-macos-x64.tar.gz](https://github.com/jvm-profiling-tools/async-profiler/releases/download/v2.0-b1/async-profiler-2.0-b1-macos-x64.tar.gz)
|
||||
|
||||
### Nightly builds
|
||||
[Previous releases](https://github.com/jvm-profiling-tools/async-profiler/releases)
|
||||
|
||||
[The most recent binaries](https://github.com/async-profiler/async-profiler/releases/tag/nightly) corresponding
|
||||
to the latest successful commit in `master`.
|
||||
## Supported platforms
|
||||
|
||||
For a build corresponding to one of the previous commits, go to
|
||||
[Nightly Builds](https://github.com/async-profiler/async-profiler/actions/workflows/test-and-publish-nightly.yml),
|
||||
click the desired build and scroll down to the artifacts section. These binaries are kept for 30 days.
|
||||
- **Linux** / x64 / x86 / ARM / AArch64
|
||||
- **macOS** / x64
|
||||
|
||||
# Quick start
|
||||
Note: macOS profiling is limited to user space code only.
|
||||
|
||||
In a typical use case, profiling a Java application is just a matter of a running `asprof` with a PID of a
|
||||
running Java process.
|
||||
## Building
|
||||
|
||||
```
|
||||
$ asprof -d 30 -f flamegraph.html <PID>
|
||||
```
|
||||
Make sure the `JAVA_HOME` environment variable points to your JDK installation,
|
||||
and then run `make`. GCC is required. After building, the profiler agent binary
|
||||
will be in the `build` subdirectory. Additionally, a small application `jattach`
|
||||
that can load the agent into the target process will also be compiled to the
|
||||
`build` subdirectory.
|
||||
|
||||
The above command translates to: run profiler for 30 seconds and save results to `flamegraph.html`
|
||||
as an interactive [Flame Graph](docs/FlamegraphInterpretation.md) that can be viewed in a browser.
|
||||
|
||||
[](https://htmlpreview.github.io/?https://github.com/async-profiler/async-profiler/blob/master/.assets/html/flamegraph.html)
|
||||
|
||||
Find more details in the [Getting started guide](docs/GettingStarted.md).
|
||||
|
||||
# Building
|
||||
|
||||
### Build status
|
||||
|
||||
[](https://github.com/async-profiler/async-profiler/actions/workflows/test-and-publish-nightly.yml)
|
||||
|
||||
### Minimum requirements
|
||||
|
||||
- make
|
||||
- GCC 7.5.0+ or Clang 7.0.0+
|
||||
- Static version of libstdc++ (e.g. on Amazon Linux 2023: `yum install libstdc++-static`)
|
||||
- JDK 11+
|
||||
|
||||
### How to build
|
||||
|
||||
Make sure `gcc`, `g++` and `java` are available on the `PATH`.
|
||||
Navigate to the root directory with async-profiler sources and run `make`.
|
||||
async-profiler launcher will be available at `build/bin/asprof`.
|
||||
|
||||
Other Makefile targets:
|
||||
|
||||
- `make test` - run unit and integration tests;
|
||||
- `make release` - package async-profiler binaries as `.tar.gz` (Linux) or `.zip` (macOS).
|
||||
|
||||
### Supported platforms
|
||||
|
||||
| | Officially maintained builds | Other available ports |
|
||||
| --------- | ---------------------------- | ----------------------------------------- |
|
||||
| **Linux** | x64, arm64 | x86, arm32, ppc64le, riscv64, loongarch64 |
|
||||
| **macOS** | x64, arm64 | |
|
||||
|
||||
# Documentation
|
||||
|
||||
## Basic usage
|
||||
|
||||
- [Getting Started](docs/GettingStarted.md)
|
||||
- [Profiler Options](docs/ProfilerOptions.md)
|
||||
- [Profiling Modes](docs/ProfilingModes.md)
|
||||
- [Integrating async-profiler](docs/IntegratingAsyncProfiler.md)
|
||||
- [Profiling In Container](docs/ProfilingInContainer.md)
|
||||
|
||||
## Profiler output
|
||||
|
||||
- [Output Formats](docs/OutputFormats.md)
|
||||
- [FlameGraph Interpretation](docs/FlamegraphInterpretation.md)
|
||||
- [JFR Visualization](docs/JfrVisualization.md)
|
||||
- [Converter Usage](docs/ConverterUsage.md)
|
||||
- [Heatmap](docs/Heatmap.md)
|
||||
|
||||
## Advanced usage
|
||||
|
||||
- [CPU Sampling Engines](docs/CpuSamplingEngines.md)
|
||||
- [Stack Walking Modes](docs/StackWalkingModes.md)
|
||||
- [Advanced Stacktrace Features](docs/AdvancedStacktraceFeatures.md)
|
||||
- [Profiling Non-Java Applications](docs/ProfilingNonJavaApplications.md)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
For known issues faced while running async-profiler and their detailed troubleshooting,
|
||||
please refer [here](docs/Troubleshooting.md).
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
## Reporting Security Issues
|
||||
|
||||
We take all security reports seriously.
|
||||
When we receive such reports,
|
||||
we will investigate and subsequently address
|
||||
any potential vulnerabilities as quickly as possible.
|
||||
If you discover a potential security issue in this project,
|
||||
please notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/).
|
||||
Please do *not* create a public GitHub issue in this project.
|
||||
2247
demo/SwingSet2.svg
Normal file
|
After Width: | Height: | Size: 141 KiB |
@@ -1,3 +0,0 @@
|
||||
FROM public.ecr.aws/docker/library/amazoncorretto:11-alpine-jdk
|
||||
|
||||
RUN apk add --no-cache make gcc g++ linux-headers musl-dev util-linux patchelf gcovr bash tar
|
||||
@@ -1,37 +0,0 @@
|
||||
FROM public.ecr.aws/amazonlinux/amazonlinux:2
|
||||
|
||||
RUN amazon-linux-extras enable python3.8
|
||||
|
||||
RUN yum update -y && yum install -y git make python38 gcc10 gcc10-c++ binutils tar
|
||||
|
||||
ARG node_version=20.19.1
|
||||
ARG node_sha256=babcd5b9e3216510b89305e6774bcdb2905ca98ff60028b67f163eb8296b6665
|
||||
RUN curl -L --output node.tar.gz https://github.com/nodejs/node/archive/refs/tags/v${node_version}.tar.gz
|
||||
RUN echo ${node_sha256} node.tar.gz | sha256sum -c
|
||||
RUN mkdir /node
|
||||
RUN tar xf node.tar.gz -C /node --strip-components=1
|
||||
WORKDIR /node
|
||||
|
||||
ENV CC=gcc10-cc
|
||||
ENV CXX=gcc10-c++
|
||||
RUN ./configure
|
||||
RUN make -j4 -s > /dev/null
|
||||
RUN make install
|
||||
|
||||
FROM public.ecr.aws/amazonlinux/amazonlinux:2
|
||||
|
||||
COPY --from=0 /usr/local/bin/node /usr/local/bin/node
|
||||
RUN amazon-linux-extras enable python3.8 && \
|
||||
yum update -y && \
|
||||
yum install -y gcc-c++ binutils make java-11-amazon-corretto patchelf tar python38 && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum && \
|
||||
python -m ensurepip && \
|
||||
python -m pip install gcovr
|
||||
|
||||
ENV NODE_JS_LOCATION=/__e/node20
|
||||
RUN cat <<EOF > /root/setup.sh
|
||||
#!/bin/sh
|
||||
mkdir -p "$NODE_JS_LOCATION/bin"
|
||||
ln --force --symbolic "/usr/local/bin/node" "$NODE_JS_LOCATION/bin/node"
|
||||
EOF
|
||||
@@ -1,8 +0,0 @@
|
||||
FROM public.ecr.aws/amazonlinux/amazonlinux:2023
|
||||
|
||||
RUN yum update -y && \
|
||||
yum install -y binutils findutils make tar gcc-c++ util-linux && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum && \
|
||||
python3 -m ensurepip && \
|
||||
python3 -m pip install gcovr
|
||||
@@ -1,32 +0,0 @@
|
||||
# Image for building async-profiler release packages
|
||||
|
||||
# Stage 0: download and build musl
|
||||
FROM public.ecr.aws/debian/debian:10-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
sudo libicu-dev patchelf curl make g++ openjdk-11-jdk-headless gcovr && \
|
||||
rm -rf /var/cache/apt /var/lib/apt/lists/*
|
||||
|
||||
ARG musl_src=musl-1.2.5
|
||||
ARG musl_sha256=a9a118bbe84d8764da0ea0d28b3ab3fae8477fc7e4085d90102b8596fc7c75e4
|
||||
|
||||
ADD https://musl.libc.org/releases/${musl_src}.tar.gz /
|
||||
RUN echo ${musl_sha256} ${musl_src}.tar.gz | sha256sum -c
|
||||
|
||||
RUN ["/bin/bash", "-c", "\
|
||||
tar xfz ${musl_src}.tar.gz && \
|
||||
cd /${musl_src} && \
|
||||
./configure --disable-shared --prefix=/usr/local/musl && \
|
||||
make -j`nproc` && make install && make clean && \
|
||||
ln -s /usr/include/$(arch)-linux-gnu/asm /usr/include/{asm-generic,linux} /usr/local/musl/include/"]
|
||||
|
||||
# Stage 1: install build tools + copy musl toolchain from the previous step
|
||||
FROM public.ecr.aws/debian/debian:10-slim
|
||||
|
||||
# The following command should be exactly the same as at stage 0 to benefit from caching.
|
||||
# libicu-dev is needed for the github actions runner
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
sudo libicu-dev patchelf curl make g++ openjdk-11-jdk-headless gcovr && \
|
||||
rm -rf /var/cache/apt /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=0 /usr/local/musl /usr/local/musl
|
||||
@@ -1,35 +0,0 @@
|
||||
# Advanced Stacktrace Features
|
||||
|
||||
## Display JIT compilation task
|
||||
|
||||
Async-profiler samples JIT compiler threads just the same way as Java threads, and hence can show
|
||||
CPU percentage spent on JIT compilation. At the same time, Java methods are different:
|
||||
some take more resources to compile, other take less. Furthermore, there are cases when
|
||||
a bug in C2 compiler causes a JIT thread to stuck in an infinite loop consuming 100% CPU.
|
||||
Async-profiler can highlight which particular Java methods take most CPU time to compile.
|
||||
|
||||

|
||||
|
||||
The feature can be enabled with the option `-F comptask` (or its agent equivalent `features=comptask`).
|
||||
|
||||
## Display actual implementation in vtable
|
||||
|
||||
In some applications, a significant amount of CPU time is spent on dispatching megamorphic virtual/interface calls.
|
||||
async-profiler shows a pseudo-frame on top of v/itable stub with the actual type of object the virtual method is
|
||||
called on. This should make clear the proportion of different receivers for the particular call site.
|
||||
|
||||

|
||||
|
||||
The feature can be enabled with the option `-F vtable` (or its agent equivalent `features=vtable`).
|
||||
|
||||
## Display instruction addresses
|
||||
|
||||
Sometimes, for low-level performance analysis, it is important to know where exactly
|
||||
CPU time is spent inside a method. As an intermediate step to the instruction-level
|
||||
profiling, async-profiler provides an option to record PC address of the currently
|
||||
running method for each execution sample. In this case, each stack trace will include
|
||||
a synthetic frame with the address at the top of every stack trace.
|
||||
|
||||

|
||||
|
||||
The feature can be enabled with the option `-F pcaddr` (or its agent equivalent `features=pcaddr`).
|
||||
@@ -1,138 +0,0 @@
|
||||
# Converter Usage
|
||||
|
||||
async-profiler provides `jfrconv` utility to convert between different profile output formats.
|
||||
`jfrconv` can be found at the same location as the `asprof` binary. Converter is also available
|
||||
as a standalone Java application: [`jfr-converter.jar`](https://github.com/async-profiler/async-profiler/releases/download/v4.0/jfr-converter.jar).
|
||||
|
||||
## Supported conversions
|
||||
|
||||
| Source | html | collapsed | pprof | pb.gz | heatmap |
|
||||
| --------- | ---- | --------- | ----- | ----- | ------- |
|
||||
| jfr | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| html | ✅ | ✅ | ❌ | ❌ | ❌ |
|
||||
| collapsed | ✅ | ✅ | ❌ | ❌ | ❌ |
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
jfrconv [options] <input> [<input>...] <output>
|
||||
```
|
||||
|
||||
The output format specified can be only one at a time for conversion from one format to another.
|
||||
|
||||
```
|
||||
Conversion options:
|
||||
-o --output FORMAT, -o can be omitted if the output file extension unambiguously determines the format, e.g. profile.collapsed
|
||||
|
||||
FORMAT can be any of the following:
|
||||
# collapsed: This is a collection of call stacks, where each line is a semicolon separated
|
||||
list of frames followed by a counter. This is used by the FlameGraph script to
|
||||
generate the FlameGraph visualization of the profile data.
|
||||
|
||||
# html: FlameGraph is a hierarchical representation of call traces of the profiled
|
||||
software in a color coded format that helps to identify a particular resource
|
||||
usage like CPU and memory for the application.
|
||||
|
||||
# pprof: pprof is a profiling visualization and analysis tool from Google. More details on
|
||||
pprof on the official github page https://github.com/google/pprof.
|
||||
|
||||
# pb.gz: This is a compressed version of pprof output.
|
||||
|
||||
# heatmap: A single page interactive heatmap that allows to explore profiling events
|
||||
on a timeline.
|
||||
|
||||
|
||||
JFR options:
|
||||
--cpu Generate only CPU profile during conversion
|
||||
--wall Generate only Wall clock profile during conversion
|
||||
--alloc Generate only Allocation profile during conversion
|
||||
--live Build allocation profile from live objects only during conversion
|
||||
--nativemem Generate native memory allocation profile
|
||||
--leak Only include memory leaks in nativemem
|
||||
--lock Generate only Lock contention profile during conversion
|
||||
-t --threads Split stack traces by threads
|
||||
-s --state LIST Filter thread states: runnable, sleeping, default. State name is case insensitive
|
||||
and can be abbreviated, e.g. -s r
|
||||
--classify Classify samples into predefined categories
|
||||
--total Accumulate total value (time, bytes, etc.) instead of samples
|
||||
--lines Show line numbers
|
||||
--bci Show bytecode indices
|
||||
--simple Simple class names instead of fully qualified names
|
||||
--norm Normalize names of hidden classes/lambdas, e.g. Original JFR transforms
|
||||
lambda names to something like pkg.ClassName$$Lambda+0x00007f8177090218/543846639
|
||||
which gets normalized to pkg.ClassName$$Lambda
|
||||
--dot Dotted class names, e.g. java.lang.String instead of java/lang/String
|
||||
--from TIME Start time in ms (absolute or relative)
|
||||
--to TIME End time in ms (absolute or relative)
|
||||
TIME can be:
|
||||
# an absolute timestamp specified in millis since epoch;
|
||||
# an absolute time in hh:mm:ss or yyyy-MM-dd'T'hh:mm:ss format;
|
||||
# a relative time from the beginning of recording;
|
||||
# a relative time from the end of recording (a negative number).
|
||||
|
||||
Flame Graph options:
|
||||
--title STRING Convert to Flame Graph with provided title
|
||||
--minwidth X Skip frames smaller than X%
|
||||
--grain X Coarsen Flame Graph to the given grain size
|
||||
--skip N Skip N bottom frames
|
||||
-r --reverse Reverse stack traces (defaults to icicle graph)
|
||||
-i --inverted Toggles the layout for reversed stacktraces from icicle to flamegraph
|
||||
and for default stacktraces from flamegraph to icicle
|
||||
-I --include REGEX Include only stacks with the specified frames, e.g. -I 'MyApplication\.main' -I 'VMThread.*'
|
||||
-X --exclude REGEX Exclude stacks with the specified frames, e.g. -X '.*pthread_cond_(wait|timedwait).*'
|
||||
--highlight REGEX Highlight frames matching the given pattern
|
||||
```
|
||||
|
||||
See the [profiler options documentation](ProfilerOptions.md#options-applicable-to-flamegraph-and-tree-view-outputs-only) for details on the `--reverse` and `--inverted` options.
|
||||
|
||||
## jfrconv examples
|
||||
|
||||
`jfrconv` utility is provided in `bin` directory of the async-profiler package.
|
||||
It requires JRE to be installed on the system.
|
||||
|
||||
### Generate Flame Graph from JFR
|
||||
|
||||
If no output file is specified, it defaults to a Flame Graph output.
|
||||
|
||||
```
|
||||
jfrconv foo.jfr
|
||||
```
|
||||
|
||||
Profiling in JFR mode allows multi-mode profiling. So the command above will generate a Flame Graph
|
||||
output, however, for a multi-mode profile output with both `cpu` and `wall-clock` events, the
|
||||
Flame Graph will have an aggregation of both in the view. Such a view wouldn't make much sense and
|
||||
hence it is advisable to use JFR conversion filter options like `--cpu` to filter out events
|
||||
during a conversion.
|
||||
|
||||
```
|
||||
jfrconv --cpu foo.jfr
|
||||
|
||||
# which is equivalent to:
|
||||
# jfrconv --cpu -o html foo.jfr foo.html
|
||||
```
|
||||
|
||||
for HTML output as HTML is the default format for conversion from JFR.
|
||||
|
||||
#### Flame Graph options
|
||||
|
||||
To add a custom title to the generated Flame Graph, use `--title`, which has the default value `Flame Graph`:
|
||||
|
||||
```
|
||||
jfrconv --cpu foo.jfr foo.html -r --title "Custom Title"
|
||||
```
|
||||
|
||||
### Other formats
|
||||
|
||||
`jfrconv` supports converting a JFR file to `collapsed`, `pprof`, `pb.gz` and `heatmap` formats as well.
|
||||
|
||||
## Standalone converter examples
|
||||
|
||||
Standalone converter jar is provided in
|
||||
[Download](https://github.com/async-profiler/async-profiler/?tab=readme-ov-file#Download).
|
||||
It accepts the same parameters as `jfrconv`.
|
||||
|
||||
Below is an example usage:
|
||||
|
||||
```
|
||||
java -jar jfr-converter.jar --cpu foo.jfr --reverse --title "Application CPU profile"
|
||||
```
|
||||
@@ -1,76 +0,0 @@
|
||||
# CPU Sampling Engines
|
||||
|
||||
Async-profiler has three options for CPU profiling: `-e cpu`, `-e itimer` and `-e ctimer`.
|
||||
|
||||
## cpu
|
||||
|
||||
`cpu` mode measures CPU time spent by the running threads. For example,
|
||||
if an application uses 2 cpu cores, each with 30% utilization, and the sampling interval is
|
||||
10ms, then the profiler will collect about `2 * 0.3 * 100 = 60` samples per second.
|
||||
In other words, 1 profiling sample means that one CPU core was actively running for N nanoseconds,
|
||||
where N is the profiling interval.
|
||||
|
||||
On Linux, `cpu` mode relies on [perf_events](https://man7.org/linux/man-pages/man2/perf_event_open.2.html).
|
||||
One `perf_event` descriptor is created for each running thread and configured to generate a signal
|
||||
every `N` nanoseconds of CPU time. This is the most accurate CPU sampler available in async-profiler
|
||||
and the only one that can obtain kernel stack traces. It, however, comes with certain restrictions.
|
||||
|
||||
Most importantly, OS configuration may limit access to `perf_events` API, e.g.,
|
||||
by `kernel.perf_event_paranoid` sysctl or by seccomp (which is often the case in a Docker container).
|
||||
If `perf_events` are available, but kernel symbols are hidden (e.g., by `kernel.kptr_resitrct` setting),
|
||||
async-profiler continues to use `perf_events`, emits a warning and does not show kernel stack traces.
|
||||
|
||||
Another important thing to consider is that `cpu` sampling engine allocates a descriptor per thread.
|
||||
This means, if an application has too many threads and OS limit for the maximum number of open descriptors
|
||||
(`ulimit -n`) is too low, an application may run out of file descriptors. The workaround
|
||||
is to simply increase file descriptor limit.
|
||||
|
||||
## itimer
|
||||
|
||||
`itimer` mode is based on [setitimer(ITIMER_PROF)](https://man7.org/linux/man-pages/man2/setitimer.2.html)
|
||||
syscall, which ideally generates a signal every given interval of CPU time consumed by the process.
|
||||
Ideally, both `itimer` and `cpu` should collect the same number of samples. Typically,
|
||||
profiles indeed look very similar. However, in [some cases](https://github.com/golang/go/issues/14434),
|
||||
`cpu` profile appears more accurate, since a signal is delivered exactly to the thread
|
||||
that overflowed a hardware counter. In contrast, `itimer` has the following limitations:
|
||||
|
||||
- Only one `itimer` signal can be delivered to a process at a time.
|
||||
- Signals are not distributed evenly between running threads.
|
||||
- Sampling resolution is limited by the size of [jiffies](https://man7.org/linux/man-pages/man7/time.7.html).
|
||||
|
||||
`itimer` profiles may be even less accurate on macOS, where `itimer` signals are often biased
|
||||
towards system calls.
|
||||
|
||||
The main advantage of `itimer` is that it works in containers and does not consume file descriptors.
|
||||
|
||||
## ctimer
|
||||
|
||||
`ctimer` is a Linux-specific alternative for `cpu` profiling mode to overcome limitations
|
||||
of `perf_events`, such as `perf_event_paraniod` setting, seccomp restriction or a low limit
|
||||
for the number of open file descriptors. `ctimer` mode relies on
|
||||
[timer_create](https://man7.org/linux/man-pages/man2/timer_create.2.html) API.
|
||||
It combines benefits of `-e cpu` and `-e itimer`, except that it does not allow collecting kernel stacks.
|
||||
|
||||
Like with `itimer`, `ctimer` resolution is limited by the size of the jiffy -
|
||||
kernel `HZ` constant, which is typically equal to 100 or 250, meaning that the minimum supported
|
||||
profiling interval is 10ms or 4ms respectively.
|
||||
|
||||
## Summary
|
||||
|
||||
Here is a summary of advantages and drawbacks of all CPU profiling engines:
|
||||
|
||||
| Attribute | cpu (perf_events) | itimer | ctimer |
|
||||
| --------------------------------- | :---------------: | :----: | :----: |
|
||||
| Can collect kernel stack traces | ✅ | ❌ | ❌ |
|
||||
| High resolution | ✅ | ❌ | ❌ |
|
||||
| Accuracy / fairness | ✅ | ❌ | 🆗 |
|
||||
| Works in containers by default | ❌ | ✅ | ✅ |
|
||||
| Does not consume file descriptors | ❌ | ✅ | ✅ |
|
||||
| macOS support | ❌ | ✅ | ❌ |
|
||||
|
||||
When using `-e cpu` on Linux, async-profiler automatically checks for `perf_events` availability
|
||||
by trying to create a dummy perf_event. If kernel-space profiling is not available,
|
||||
async-profiler transparently falls back to `ctimer` mode. To force using `perf_events`
|
||||
for user-space only profiling, specify `-e cpu-clock --all-user` instead of `-e cpu`.
|
||||
|
||||
The actual profiling engine (`perf_events`, `ctimer`, etc.) is now recorded in `jfr` output.
|
||||
@@ -1,85 +0,0 @@
|
||||
# FlameGraph interpretation
|
||||
|
||||
To interpret a flame graph, the best way forward is to understand how it is created.
|
||||
|
||||
## Example application to profile
|
||||
|
||||
Let's take the below example:
|
||||
|
||||
```
|
||||
main() {
|
||||
// some business logic
|
||||
func3() {
|
||||
// some business logic
|
||||
func7();
|
||||
}
|
||||
|
||||
// some business logic
|
||||
func4();
|
||||
|
||||
// some business logic
|
||||
func1() {
|
||||
// some business logic
|
||||
func5();
|
||||
}
|
||||
|
||||
// some business logic
|
||||
func2() {
|
||||
// some business logic
|
||||
func6() {
|
||||
// some business logic
|
||||
func8(); // cpu intensive work here
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Profiler sampling
|
||||
|
||||
Profiling starts by taking samples `X` times per second. Whenever a sample is taken,
|
||||
the current call stack for it is saved. The diagram below shows the unsorted sampling view
|
||||
before the sorting and aggregation takes place.
|
||||
|
||||

|
||||
|
||||
Below are the sampling numbers:
|
||||
|
||||
- `func3()->func7()`: 3 samples
|
||||
- `func4()`: 1 sample
|
||||
- `func1()->func5()`: 2 samples
|
||||
- `func2()->func8()`: 4 samples
|
||||
- `func2()->func6()`: 1 sample
|
||||
|
||||
## Sorting samples
|
||||
|
||||
Samples are then alphabetically sorted at the base level just after root (or main method) of the application.
|
||||
|
||||

|
||||
|
||||
Note that X-axis is no longer a timeline. Flame graph does not preserve information
|
||||
on _when_ a particular stack trace was taken, it only indicates _how often_
|
||||
a stack trace was observed during profiling.
|
||||
|
||||
## Aggregated view
|
||||
|
||||
The blocks for the same functions at each level of stack depth are then stitched together
|
||||
to get an aggregated view of the flame graph.
|
||||

|
||||
|
||||
In this example, except `func4()`, no other function actually consumes
|
||||
any resource at the base level of stack depth. `func5()`, `func6()`,
|
||||
`func7()` and `func8()` are the ones consuming resources, with `func8()`
|
||||
being a likely candidate for performance optimization.
|
||||
|
||||
CPU utilization is the most common use case for flame graphs, however,
|
||||
there are other modes of profiling like allocation profiling to view
|
||||
heap utilization and wall-clock profiling to view latency.
|
||||
|
||||
[More on various modes of profiling](ProfilingModes.md)
|
||||
|
||||
## Understanding FlameGraph colors
|
||||
|
||||
Color is another flame graph dimension that may be used to encode additional information
|
||||
about each frame. Colors may have different meaning in various flame graph implementations.
|
||||
async-profiler uses the following palette to differentiate frame types:
|
||||
|
||||

|
||||
@@ -1,106 +0,0 @@
|
||||
# Getting started guide
|
||||
|
||||
## Before profiling
|
||||
|
||||
As of Linux 4.6, capturing kernel call stacks using `perf_events` from a non-root
|
||||
process requires setting two kernel parameters. You can set them using sysctl as follows:
|
||||
|
||||
```
|
||||
# sysctl kernel.perf_event_paranoid=1
|
||||
# sysctl kernel.kptr_restrict=0
|
||||
```
|
||||
|
||||
## Find a process to profile
|
||||
|
||||
Common ways to find the target process include using
|
||||
[`jps`](https://docs.oracle.com/en/java/javase/21/docs/specs/man/jps.html) and
|
||||
[`pgrep`](https://man7.org/linux/man-pages/man1/pgrep.1.html).
|
||||
For example, to list all Java process IDs with their full command lines, run
|
||||
`pgrep -a java`. The next section includes an example using `jps`.
|
||||
|
||||
## Start profiling
|
||||
|
||||
async-profiler works in the context of the target Java application,
|
||||
i.e. it runs as an agent in the process being profiled.
|
||||
`asprof` is a tool to attach and control the agent.
|
||||
|
||||
A typical workflow would be to launch your Java application, attach
|
||||
the agent and start profiling, exercise your performance scenario, and
|
||||
then stop profiling. The agent's output, including the profiling results, will
|
||||
be displayed on the console where you've started `asprof`.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
$ jps
|
||||
9234 Jps
|
||||
8983 Computey
|
||||
$ asprof start 8983
|
||||
$ asprof stop 8983
|
||||
```
|
||||
|
||||
The following may be used in lieu of the `pid` (8983):
|
||||
|
||||
- The keyword `jps`, which will find `pid` automatically, if there is a single Java process running in the system.
|
||||
- The application name as it appears in the `jps` output: e.g. `Computey`
|
||||
|
||||
Alternatively, you may specify `-d` (duration) argument to profile
|
||||
the application for a fixed period of time with a single command.
|
||||
|
||||
```
|
||||
$ asprof -d 30 8983
|
||||
```
|
||||
|
||||
By default, the profiling frequency is 100Hz (every 10ms of CPU time).
|
||||
Here is a sample output of `asprof`:
|
||||
|
||||
```
|
||||
--- Execution profile ---
|
||||
Total samples: 687
|
||||
Unknown (native): 1 (0.15%)
|
||||
|
||||
--- 6790000000 (98.84%) ns, 679 samples
|
||||
[ 0] Primes.isPrime
|
||||
[ 1] Primes.primesThread
|
||||
[ 2] Primes.access$000
|
||||
[ 3] Primes$1.run
|
||||
[ 4] java.lang.Thread.run
|
||||
|
||||
... a lot of output omitted for brevity ...
|
||||
|
||||
ns percent samples top
|
||||
---------- ------- ------- ---
|
||||
6790000000 98.84% 679 Primes.isPrime
|
||||
40000000 0.58% 4 __do_softirq
|
||||
|
||||
... more output omitted ...
|
||||
```
|
||||
|
||||
This indicates that the hottest method was `Primes.isPrime`, and the hottest
|
||||
call stack leading to it comes from `Primes.primesThread`.
|
||||
|
||||
## Other use cases
|
||||
|
||||
- [Launching as an agent](IntegratingAsyncProfiler.md#launching-as-an-agent)
|
||||
- [Java API](IntegratingAsyncProfiler.md#using-java-api)
|
||||
- [IntelliJ IDEA](IntegratingAsyncProfiler.md#intellij-idea)
|
||||
|
||||
## FlameGraph visualization
|
||||
|
||||
async-profiler provides out-of-the-box [Flame Graph](https://www.brendangregg.com/flamegraphs.html) support.
|
||||
Specify `-o flamegraph` argument to dump profiling results as an interactive HTML Flame Graph.
|
||||
Also, Flame Graph output format will be chosen automatically if the target filename ends with `.html`.
|
||||
|
||||
```
|
||||
$ jps
|
||||
9234 Jps
|
||||
8983 Computey
|
||||
$ asprof -d 30 -f /tmp/flamegraph.html 8983
|
||||
```
|
||||
|
||||
[](https://htmlpreview.github.io/?https://github.com/async-profiler/async-profiler/blob/master/.assets/html/flamegraph.html)
|
||||
|
||||
The flame graph html can be opened in any browser of your choice for further interpretation.
|
||||
|
||||
Please refer to [Interpreting a Flame Graph](FlamegraphInterpretation.md)
|
||||
to understand more on how to interpret a Flame Graph.
|
||||
@@ -1,94 +0,0 @@
|
||||
# Heatmap
|
||||
|
||||
Problems to be solved with a profiler can be divided into two large categories:
|
||||
|
||||
1. Optimization of overall resource usage.
|
||||
2. Troubleshooting of intermittent performance issues.
|
||||
|
||||
While flame graphs are handy for the first type of problems, they are not very helpful
|
||||
for analyzing transient anomalies because they provide an aggregated view that lacks
|
||||
any timeline information. To address the second type of problems, async-profiler offers
|
||||
a converter from JFR format to an interactive heatmap in the form of a single-page HTML file.
|
||||
|
||||
Heatmap is an alternative representation of profile data that preserves timestamps
|
||||
of particular samples. Essentially, it's a two-dimensional timeline composed of
|
||||
colored blocks. Each block represents a short period of time (usually in the range of
|
||||
milliseconds to seconds) with its color being the third dimension: the more intense
|
||||
the color, the more events happened in a given period of time.
|
||||
|
||||

|
||||
|
||||
The idea of heatmaps was borrowed from [FlameScope](https://github.com/Netflix/flamescope),
|
||||
however, FlameScope targets short profiling intervals up to a few minutes, whereas
|
||||
async-profiler implementation is capable of visualizing 24-hour recordings
|
||||
with the granularity of 20 milliseconds. Moreover, heatmaps produced by async-profiler
|
||||
are serverless: they are standalone self-contained HTML files that can be easily shared
|
||||
and viewed without additional software besides a browser.
|
||||
|
||||
## Heatmap features
|
||||
|
||||
### Whole day profile
|
||||
|
||||
Heatmaps are optimized for information density. Full day of continuous profiling
|
||||
can be presented on a single image, where an engineer can spot regular activity
|
||||
patterns as well as anomalies at a glance.
|
||||
|
||||
Heatmaps are also optimized for footprint. Specialized compression algorithms
|
||||
can pack 1 GB original JFR recording to an HTML page of 10-15 MB in size.
|
||||
|
||||

|
||||
|
||||
### Scale / zoom
|
||||
|
||||
Depending on the recording duration and level of detail you are interested in,
|
||||
you can switch between 3 available scales. On the largest scale, each vertical line
|
||||
represents 5 minutes of wall clock time, with each square corresponding to
|
||||
5 second interval. On the finest scale, each square corresponds to 20 milliseconds,
|
||||
allowing you to analyze profiling samples with a high resolution.
|
||||
|
||||

|
||||
|
||||
### Instant flame graphs
|
||||
|
||||
A click on any heatmap square displays a flame graph for this specific time interval.
|
||||
|
||||

|
||||
|
||||
Hold mouse button to select an arbitrary time range on a heatmap.
|
||||
A flame graph for the given time range will be built automatically.
|
||||
|
||||

|
||||
|
||||
### Compare time ranges
|
||||
|
||||
Select target time range as described above. Holding `Ctrl` key,
|
||||
move mouse pointer to choose another time range that will serve as a baseline.
|
||||
You will then get a differential flame graph highlighting stacks
|
||||
that were seen more often in the target time range comparing to the baseline.
|
||||
|
||||

|
||||
|
||||
### Search
|
||||
|
||||
Press `Ctrl+F` and enter a regex to search on the entire heatmap.
|
||||
Time intervals containing matched stacks will be highlighted on a heatmap in blue.
|
||||
Matching frames, if any, will be also highlighted on a flame graph.
|
||||
|
||||
`Ctrl+Shift+F` does the same, except that a flame graph will
|
||||
retain stacks with matching frames only. All other stacks will be filtered out.
|
||||
|
||||

|
||||
|
||||
## Producing heatmaps
|
||||
|
||||
Heatmaps can only be generated from recordings in JFR format.
|
||||
Run [`jfrconv`](ConverterUsage.md) tool with `-o heatmap` option.
|
||||
|
||||
Standard `jfrconv` options (`--cpu`, `--alloc`, `--from`/`--to`, `--simple`, etc.)
|
||||
are also applicable to heatmaps.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
jfrconv --cpu -o heatmap profiler.jfr heatmap-cpu.html
|
||||
```
|
||||
@@ -1,58 +0,0 @@
|
||||
# Integrating async-profiler
|
||||
|
||||
## Launching as an agent
|
||||
|
||||
If you need to profile some code as soon as the JVM starts up, instead of using `asprof`,
|
||||
it is possible to attach async-profiler as an agent on the command line. For example:
|
||||
|
||||
```
|
||||
$ java -agentpath:/path/to/libasyncProfiler.so=start,event=cpu,file=profile.html ...
|
||||
```
|
||||
|
||||
Agent library is configured through the JVMTI argument interface.
|
||||
The format of the arguments string is described
|
||||
[in the source code](https://github.com/async-profiler/async-profiler/blob/v4.0/src/arguments.cpp#L44).
|
||||
`asprof` actually converts command line arguments to that format.
|
||||
|
||||
Another important use of attaching async-profiler as an agent is for continuous profiling.
|
||||
|
||||
## Using Java API
|
||||
|
||||
async-profiler can be controlled programmatically using Java API. The corresponding Java library
|
||||
is published to Maven Central. You can [include it](https://mvnrepository.com/artifact/tools.profiler/async-profiler/latest)
|
||||
just like any other Maven dependency:
|
||||
|
||||
```
|
||||
<dependency>
|
||||
<groupId>tools.profiler</groupId>
|
||||
<artifactId>async-profiler</artifactId>
|
||||
<version>X.Y</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
### Example usage with the API
|
||||
|
||||
```
|
||||
AsyncProfiler profiler = AsyncProfiler.getInstance();
|
||||
```
|
||||
|
||||
The above gives us an instance of `AsyncProfiler` object which can be further used to start
|
||||
actual profiling.
|
||||
|
||||
```
|
||||
profiler.execute("start,jfr,event=cpu,file=/path/to/%p.jfr");
|
||||
// do some meaningful work
|
||||
profiler.execute("stop");
|
||||
```
|
||||
|
||||
`%p` equates to the PID of the process. Filename may include other placeholders which
|
||||
can be found in [Profiler Options](ProfilerOptions.md).
|
||||
`file` should be specified only once, either in
|
||||
`start` command with `jfr` output or in `stop` command with any other format.
|
||||
|
||||
## Intellij IDEA
|
||||
|
||||
Intellij IDEA comes bundled with async-profiler, which can be further configured to our needs
|
||||
by selecting the `Java Profiler` menu option at `Settings/Preferences > Build, Execution, Deployment`.
|
||||
Agent options can be modified for the specific use cases and also `Collect native calls` can be checked
|
||||
to monitor non-java threads and native frames in Java stack traces.
|
||||
@@ -1,41 +0,0 @@
|
||||
# JFR Visualization
|
||||
|
||||
JFR recordings produced by async-profiler can be viewed using multiple options explained below.
|
||||
|
||||
## Built-in converter
|
||||
|
||||
async-profiler provides a built-in converter `jfrconv` which can be used to convert `jfr` output
|
||||
to a flame graph or one of the other supported formats. More details on the built-in converter usage
|
||||
can be found [here](ConverterUsage.md).
|
||||
|
||||
## JMC
|
||||
|
||||
[JDK Mission Control](https://www.oracle.com/java/technologies/jdk-mission-control.html) (JMC)
|
||||
is a popular GUI tool to analyze JFR recordings.
|
||||
It has been originally developed to work in conjunction with the JDK Flight Recorder,
|
||||
however, async-profiler recordings are also fully compatible with JMC.
|
||||
|
||||
When viewing async-profiler recordings in JMC, information on some tabs may be missing.
|
||||
Developers are typically interested in the following sections:
|
||||
|
||||
- Java Application
|
||||
- Method Profiling
|
||||
- Memory
|
||||
- Lock Instances
|
||||
- JVM Internals
|
||||
- TLAB Allocations
|
||||
|
||||
## IntelliJ IDEA
|
||||
|
||||
IntelliJ IDEA Ultimate has built-in JFR viewer that works perfectly with async-profiler recordings.
|
||||
For the Community Edition, there is an open-source profiler [plugin](https://plugins.jetbrains.com/plugin/20937-java-jfr-profiler)
|
||||
that allows you to profile Java applications with JFR and async-profiler as well as
|
||||
open JFR files obtained outside IDE.
|
||||
|
||||
## JFR command line tool
|
||||
|
||||
JDK distributions include the `jfr` command line utility to filter, summarize and output
|
||||
flight recording files into human-readable format. The
|
||||
[official documentation](https://docs.oracle.com/en/java/javase/21/docs/specs/man/jfr.html)
|
||||
provides complete information on how to manipulate the contents and translate it as per
|
||||
developers' needs to debug performance issues with their Java applications.
|
||||
@@ -1,60 +0,0 @@
|
||||
# Output Formats
|
||||
|
||||
async-profiler currently supports the following output formats:
|
||||
|
||||
- `collapsed` - This is a collection of call stacks, where each line is a semicolon separated list of frames followed
|
||||
by a counter. This is used by the FlameGraph script to generate the FlameGraph visualization of the profile data.
|
||||
|
||||
```
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult 21
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult;java/io/DataOutputStream.writeInt 1
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult;java/io/DataOutputStream.writeInt;java/io/ByteArrayOutputStream.write 5
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult;java/io/DataOutputStream.writeUTF;java/io/DataOutputStream.writeUTF 12
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult;java/io/DataOutputStream.writeUTF;java/io/DataOutputStream.writeUTF;java/lang/String.length 3
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult;java/io/DataOutputStream.writeUTF;java/io/DataOutputStream.writeUTF;java/io/DataOutputStream.write 6
|
||||
start_thread;thread_native_entry;Thread::call_run;VMThread::run;VMThread::inner_execute;VMThread::evaluate_operation;VM_Operation::evaluate;VM_GenCollectForAllocation::doit;GenCollectedHeap::satisfy_failed_allocation;GenCollectedHeap::do_collection;GenCollectedHeap::collect_generation;DefNewGeneration::collect;DefNewGeneration::FastEvacuateFollowersClosure::do_void 12
|
||||
start_thread;thread_native_entry;Thread::call_run;VMThread::run;VMThread::inner_execute;VMThread::evaluate_operation;VM_Operation::evaluate;VM_GenCollectForAllocation::doit;GenCollectedHeap::satisfy_failed_allocation;GenCollectedHeap::do_collection;GenCollectedHeap::collect_generation;DefNewGeneration::collect;DefNewGeneration::FastEvacuateFollowersClosure::do_void;void ContiguousSpace::oop_since_save_marks_iterate<DefNewScanClosure> 1
|
||||
```
|
||||
|
||||
- `flamegraph` - FlameGraph is a hierarchical representation of call traces of the profiled software in a color coded
|
||||
format. Read more on the [interpretation](FlamegraphInterpretation.md) of FlameGraphs.
|
||||
[](https://htmlpreview.github.io/?https://github.com/async-profiler/async-profiler/blob/master/.assets/html/flamegraph.html)
|
||||
|
||||
- `tree` - Profile output generated in HTML format showing a tree view of resource usage beginning with the call stack
|
||||
with the highest resource usage and then showing other call stacks in descending order of resource usage. Expanding a
|
||||
parent frame follows the same hierarchical representation within that frame.
|
||||

|
||||
|
||||
- `text` - If no output format is specified with `-o` and filename has no extension provided, profiled output is
|
||||
generated in text format.
|
||||
|
||||
```
|
||||
--- Execution profile ---
|
||||
Total samples : 733
|
||||
|
||||
--- 8208 bytes (19.58%), 1 sample
|
||||
[ 0] byte[]
|
||||
[ 1] java.util.jar.Manifest$FastInputStream.<init>
|
||||
[ 2] java.util.jar.Manifest$FastInputStream.<init>
|
||||
[ 3] java.util.jar.Manifest.read
|
||||
[ 4] java.util.jar.Manifest.<init>
|
||||
[ 5] java.util.jar.Manifest.<init>
|
||||
[ 6] java.util.jar.JarFile.getManifestFromReference
|
||||
[ 7] java.util.jar.JarFile.getManifest
|
||||
[ 8] jdk.internal.loader.URLClassPath$JarLoader$2.getManifest
|
||||
[ 9] jdk.internal.loader.BuiltinClassLoader.defineClass
|
||||
[10] jdk.internal.loader.BuiltinClassLoader.findClassOnClassPathOrNull
|
||||
[11] jdk.internal.loader.BuiltinClassLoader.loadClassOrNull
|
||||
[12] jdk.internal.loader.BuiltinClassLoader.loadClass
|
||||
[13] jdk.internal.loader.ClassLoaders$AppClassLoader.loadClass
|
||||
[14] java.lang.ClassLoader.loadClass
|
||||
[15] java.lang.Class.forName0
|
||||
[16] java.lang.Class.forName
|
||||
[17] sun.launcher.LauncherHelper.loadMainClass
|
||||
[18] sun.launcher.LauncherHelper.checkAndLoadMain
|
||||
```
|
||||
|
||||
- `jfr` - profile format used by the JDK Flight Recorder. The `jfr` format collects data
|
||||
about the JVM as well as the Java application running on it. async-profiler can generate output in `jfr` format
|
||||
compatible with tools capable of viewing and analyzing `jfr` files. JDK Mission Control (JMC) and Intellij IDEA are
|
||||
some of many options to visualize `jfr` files. More details [here](JfrVisualization.md).
|
||||
@@ -1,118 +0,0 @@
|
||||
# Profiler options
|
||||
|
||||
The below tables list the profiler options available with `asprof` and also when
|
||||
[launching as an agent](IntegratingAsyncProfiler.md#launching-as-an-agent).
|
||||
Some tables are output specific, which means some options are applicable to only one or more output formats but not all.
|
||||
|
||||
```
|
||||
Usage: asprof [action] [options] [PID]
|
||||
```
|
||||
|
||||
## Actions
|
||||
|
||||
The below options are `action`s for async-profiler and common for both `asprof` binary and when launching as an agent.
|
||||
|
||||
| Option | Description |
|
||||
| --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `start` | Start profiling in semi-automatic mode, i.e. profiler will run until `stop` command is explicitly called. |
|
||||
| `resume` | Start or resume earlier profiling session that has been stopped. All the collected data remains valid. The profiling options are not preserved between sessions, and should be specified again. |
|
||||
| `stop` | Stop profiling and print the report. |
|
||||
| `dump` | Dump collected data without stopping profiling session. |
|
||||
| `check` | Check if the specified profiling event is available. |
|
||||
| `status` | Print profiling status: whether profiler is active and for how long. |
|
||||
| `meminfo` | Print used memory statistics. |
|
||||
| `list` | Show the list of profiling events available for the target process specified with PID. |
|
||||
|
||||
## Options applicable to any output format
|
||||
|
||||
| asprof | Launch as agent | Description |
|
||||
| ------------------ | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-o fmt` | `fmt` | Specifies what information to dump when profiling ends. For various dump option details, please refer to [Dump Option Appendix](#dump-option). |
|
||||
| `-d N` | N/A | asprof-only option designed for interactive use. It is a shortcut for running 3 actions: start, sleep for N seconds, stop. If no `start`, `resume`, `stop` or `status` option is given, the profiler will run for the specified period of time and then automatically stop.<br>Example: `asprof -d 30 <pid>` |
|
||||
| `--timeout N` | `timeout=N` | The profiling duration, in seconds. The profiler will run for the specified period of time and then automatically stop.<br>Example: `java -agentpath:/path/to/libasyncProfiler.so=start,event=cpu,timeout=30,file=profile.html <application>` |
|
||||
| `-e --event EVENT` | `event=EVENT` | The profiling event: `cpu`, `alloc`, `nativemem`, `lock`, `cache-misses` etc. Use `list` to see the complete list of available events.<br>Please refer to [Profiling Modes](ProfilingModes.md) for additional information. |
|
||||
| `-i --interval N` | `interval=N` | Interval has different meaning depending on the event. For CPU profiling, it's CPU time in nanoseconds. In wall clock mode, it's wall clock time. For Java method profiling or native function profiling, it's number of calls. For PMU profiling, it's number of events. Time intervals may be followed by `s` for seconds, `ms` for milliseconds, `us` for microseconds or `ns` for nanoseconds.<br>Example: `asprof -e cpu -i 5ms 8983` |
|
||||
| `--alloc N` | `alloc=N` | Allocation profiling interval in bytes or in other units, if N is followed by `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). |
|
||||
| `--live` | `live` | Retain allocation samples with live objects only (object that have not been collected by the end of profiling session). Useful for finding Java heap memory leaks. |
|
||||
| `--nativemem N` | `nativemem=N` | Native memory allocation profiling. N, if specified is the interval in bytes or in other units, if N is followed by `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). Default N is 0. |
|
||||
| `--nofree` | `nofree` | Will not record free calls in native memory allocation profiling. This is relevant when tracking memory leaks is not important and there are lots of free calls. |
|
||||
| `--lock DURATION` | `lock=DURATION` | In lock profiling mode, sample contended locks when total lock duration overflows the threshold. |
|
||||
| `--wall INTERVAL` | `wall=INTERVAL` | Wall clock profiling interval. Use this option instead of `-e wall` to enable wall clock profiling with another event, typically `cpu`.<br>Example: `asprof -e cpu --wall 100ms -f combined.jfr 8983`. |
|
||||
| `-j N` | `jstackdepth=N` | Sets the maximum stack depth. The default is 2048.<br>Example: `asprof -j 30 8983` |
|
||||
| `-I PATTERN` | `include=PATTERN` | Filter stack traces by the given pattern(s). `-I` defines the name pattern that _must_ be present in the stack traces. `-I` can be specified multiple times. A pattern may begin or end with a star `*` that denotes any (possibly empty) sequence of characters.<br>Example: `asprof -I 'Primes.*' -I 'java/*' 8983` |
|
||||
| `-X PATTERN` | `exclude=PATTERN` | Filter stack traces by the given pattern(s). `-X` defines the name pattern that _must not_ occur in any of stack traces in the output. `-X` can be specified multiple times. A pattern may begin or end with a star `*` that denotes any (possibly empty) sequence of characters.<br>Example: `asprof -X '*Unsafe.park*' 8983` |
|
||||
| `-L level` | `loglevel=level` | Log level: `debug`, `info`, `warn`, `error` or `none`. |
|
||||
| `-F features` | `features=LIST` | Comma separated (or `+` separated when launching as an agent) list of stack walking features. Supported features are:<ul><li>`stats` - log stack walking performance stats.</li><li>`vtable` - display targets of megamorphic virtual calls as an extra frame on top of `vtable stub` or `itable stub`.</li><li>`comptask` - display current compilation task (a Java method being compiled) in a JIT compiler stack trace.</li><li>`pcaddr` - display instruction addresses .</li></ul>More details [here](AdvancedStacktraceFeatures.md). |
|
||||
| `-f FILENAME` | `file` | The file name to dump the profile information to.<br>`%p` in the file name is expanded to the PID of the target JVM;<br>`%t` - to the timestamp;<br>`%n{MAX}` - to the sequence number;<br>`%{ENV}` - to the value of the given environment variable.<br>Example: `asprof -o collapsed -f /tmp/traces-%t.txt 8983` |
|
||||
| `--loop TIME` | `loop=TIME` | Run profiler in a loop (continuous profiling). The argument is either a clock time (`hh:mm:ss`) or a loop duration in `s`econds, `m`inutes, `h`ours, or `d`ays. Make sure the filename includes a timestamp pattern, or the output will be overwritten on each iteration.<br>Example: `asprof --loop 1h -f /var/log/profile-%t.jfr 8983` |
|
||||
| `--all-user` | `alluser` | Include only user-mode events. This option is helpful when kernel profiling is restricted by `perf_event_paranoid` settings. |
|
||||
| `--sched` | `sched` | Group threads by Linux-specific scheduling policy: BATCH/IDLE/OTHER. |
|
||||
| `--cstack MODE` | `cstack=MODE` | How to walk native frames (C stack). Possible modes are `fp` (Frame Pointer), `dwarf` (DWARF unwind info), `lbr` (Last Branch Record, available on Haswell since Linux 4.1), `vm`, `vmx` (HotSpot VM Structs) and `no` (do not collect C stack).<br><br>By default, C stack is shown in cpu, ctimer, wall-clock and perf-events profiles. Java-level events like `alloc` and `lock` collect only Java stack. |
|
||||
| `--signal NUM` | `signal=NUM` | Use alternative signal for cpu or wall clock profiling. To change both signals, specify two numbers separated by a slash: `--signal SIGCPU/SIGWALL`. |
|
||||
| `--clock SOURCE` | `clock=SOURCE` | Clock source for JFR timestamps: `tsc` (default) or `monotonic` (equivalent for `CLOCK_MONOTONIC`). |
|
||||
| `--begin function` | `begin=FUNCTION` | Automatically start profiling when the specified native function is executed. |
|
||||
| `--end function` | `end=FUNCTION` | Automatically stop profiling when the specified native function is executed. |
|
||||
| `--ttsp` | `ttsp` | Time-to-safepoint profiling. An alias for `--begin SafepointSynchronize::begin --end RuntimeService::record_safepoint_synchronized`.<br>It is not a separate event type, but rather a constraint. Whatever event type you choose (e.g. `cpu` or `wall`), the profiler will work as usual, except that only events between the safepoint request and the start of the VM operation will be recorded. |
|
||||
| `--nostop` | `nostop` | Record profiling window between `--begin` and `--end`, but do not stop profiling outside window. |
|
||||
| `--libpath PATH` | `libpath=PATH` | Full path to `libasyncProfiler.so` (useful when profiling a container from the host). |
|
||||
| `--filter FILTER` | `filter=FILTER` | In the wall-clock profiling mode, profile only threads with the specified ids.<br>Example: `asprof -e wall -d 30 --filter 120-127,132,134 Computey` |
|
||||
| `--fdtransfer` | `fdtransfer` | Run a background process that provides access to perf_events to an unprivileged process. `--fdtransfer` is useful for profiling a process in a container (which lacks access to perf_events) from the host.<br>See [Profiling Java in a container](ProfilingInContainer.md). |
|
||||
| `--target-cpu` | `target-cpu` | In perf_events profiling mode, instruct the profiler to only sample threads running on the specified CPU, defaults to -1.<br>Example: `asprof --target-cpu 3`. |
|
||||
| `--record-cpu` | `record-cpu` | In perf_events profiling mode, instruct the profiler to capture which CPU a sample was taken on. |
|
||||
| `-v --version` | `version` | Prints the version of profiler library. If PID is specified, gets the version of the library loaded into the given process. |
|
||||
|
||||
## Options applicable to JFR output only
|
||||
|
||||
| asprof | Launch as agent | Description |
|
||||
| ------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--chunksize N` | `chunksize=N` | Approximate size for a single JFR chunk. A new chunk will be started whenever specified size is reached. The default `chunksize` is 100MB.<br>Example: `asprof -f profile.jfr --chunksize 100m 8983` |
|
||||
| `--chunktime N` | `chunktime=N` | Approximate time limit for a single JFR chunk. A new chunk will be started whenever specified time limit is reached. The default `chunktime` is 1 hour.<br>Example: `asprof -f profile.jfr --chunktime 1h 8983` |
|
||||
| `--jfropts OPTIONS` | `jfropts=OPTIONS` | Comma separated list of JFR recording options. Currently, the only available option is `mem` supported on Linux 3.17+. `mem` enables accumulating events in memory instead of flushing synchronously to a file. |
|
||||
| `--jfrsync CONFIG` | `jfrsync[=CONFIG]` | Start Java Flight Recording with the given configuration synchronously with the profiler. The output .jfr file will include all regular JFR events, except that execution samples will be obtained from async-profiler. This option implies `-o jfr`.<br>`CONFIG` is a predefined JFR profile or a JFR configuration file (.jfc) or a list of JFR events started with `+`.<br><br>Example: `asprof -e cpu --jfrsync profile -f combined.jfr 8983` |
|
||||
| `--all` | `all` | Shorthand for enabling `cpu`, `wall`, `alloc`, `live`, `nativemem` and `lock` profiling simultaneously. This can be combined with `--alloc 2m --lock 10ms` etc. to pass custom interval/threshold. It is also possible to combine it with `-e` argument to change the type of event being collected (default is `cpu`). This is not recommended for production, especially for continuous profiling. |
|
||||
|
||||
## Options applicable to FlameGraph and Tree view outputs only
|
||||
|
||||
| asprof | Launch as agent | Description |
|
||||
| -------------------- | ------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--title TITLE` | `title=TITLE` | Custom title of a FlameGraph.<br>Example: `asprof -f profile.html --title "Sample CPU profile" 8983` |
|
||||
| `--minwidth PERCENT` | `minwidth=PERCENT` | Minimum frame width as a percentage. Smaller frames will not be visible.<br>Example: `asprof -f profile.html --minwidth 0.5 8983` |
|
||||
| `--reverse` | `reverse` | Reverse stack traces (defaults to icicle graph).<br>Example: `asprof -f profile.html --reverse 8983` |
|
||||
| `--inverted` | `inverted` | Toggles the layout for reversed stacktraces from icicle to flamegraph and for default stacktraces from flamegraph to icicle.<br>Example: `asprof -f profile.html --inverted 8983` |
|
||||
|
||||
Notice that `--reverse` and `--inverted` are orthogonal settings. By default, flamegraphs grow from bottom to top (because flames grow from bottom to top). The outermost frames (e.g. the `main()` function) are shown at the bottom while the innermost, leaf frames are shown at the top. If such a flame graph is mirrored on the y-axis, it becomes an icicle graph (icicles grow top-down). The default setting for this layout can be toggled with the `--inverted` option when the graph is created or changed later with the `Invert` button which is located in the upper-left corner of the generated HTML page, when the graph is displayed.
|
||||
|
||||
By default, async-profiler merges stack traces starting from the outermost (e.g. `main()`) frames and displays them from bottom to top in a flamegraph. The `--reverse` option can be used to create reverse stack traces, i.e. merge them starting with the innermost, leaf frames. By default, reversed stack traces are displayed from top to bottom as icicle graphs. The default layout setting for both, normal and reversed stack traces can be changed with the `--inverted` option.
|
||||
|
||||
## Options applicable to any output format except JFR
|
||||
|
||||
| asprof | Launch as agent | Description |
|
||||
| -------------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `-t --threads` | `threads` | Profile threads separately. Each stack trace will end with a frame that denotes a single thread.<br>Example: `asprof -t 8983` |
|
||||
| `-s --simple` | `simple` | Print simple class names instead of fully qualified names. |
|
||||
| `-n --norm` | `norm` | Normalize names of hidden classes / lambdas. |
|
||||
| `-g --sig` | `sig` | Print method signatures. |
|
||||
| `-l --lib` | `lib` | Prepend library names to symbols, e.g. ``libjvm.so`JVM_DefineClassWithSource``. |
|
||||
| `--total` | `total` | Count the total value of the collected metric instead of the number of samples, e.g. total allocation size. |
|
||||
| `-a --ann` | `ann` | Annotate JIT compiled methods with `_[j]`, inlined methods with `_[i]`, interpreted methods with `_[0]` and C1 compiled methods with `_[1]`. FlameGraph and Tree view will color frames depending on their type regardless of this option. |
|
||||
|
||||
## Appendix
|
||||
|
||||
### Dump Option
|
||||
|
||||
`-o fmt` - specifies what information to dump when profiling ends.
|
||||
`fmt` can be one of the following options:
|
||||
|
||||
- `traces[=N]` - dump call traces (at most N samples);
|
||||
- `flat[=N]` - dump flat profile (top N hot methods);
|
||||
- can be combined with `traces`, e.g. `traces=200,flat=200`
|
||||
- `jfr` - dump events in JDK Flight Recorder format readable by JDK Mission Control.
|
||||
- `collapsed` - dump collapsed call traces in the format used by
|
||||
[FlameGraph](https://github.com/brendangregg/FlameGraph) script. This is
|
||||
a collection of call stacks, where each line is a semicolon separated list
|
||||
of frames followed by a counter.
|
||||
- `flamegraph` - produce Flame Graph in HTML format.
|
||||
- `tree` - produce Call Tree in HTML format.
|
||||
- `--reverse` option will generate backtrace view.
|
||||
|
||||
It is possible to specify multiple dump options at the same time.
|
||||
@@ -1,24 +0,0 @@
|
||||
# Profiling Java in a container
|
||||
|
||||
async-profiler provides the ability to profile Java processes running in a Docker or LXC
|
||||
container both from within a container and from the host system.
|
||||
|
||||
When profiling from the host, `pid` should be the Java process ID in the host
|
||||
namespace. Use `ps aux | grep java` or `docker top <container>` to find
|
||||
the process ID.
|
||||
|
||||
async-profiler should be run from the host by a privileged user - it will
|
||||
automatically switch to the proper pid/mount namespace and change
|
||||
user credentials to match the target process. Also make sure that
|
||||
the target container can access `libasyncProfiler.so` by the same
|
||||
absolute path as on the host. Alternatively, specify `--libpath` option
|
||||
to override path to `libasyncProfiler.so` in a container.
|
||||
|
||||
By default, Docker container restricts the access to `perf_event_open`
|
||||
syscall. There are 3 alternatives to allow profiling in a container:
|
||||
|
||||
1. You can modify the [seccomp profile](https://docs.docker.com/engine/security/seccomp/)
|
||||
or disable it altogether with `--security-opt seccomp=unconfined` option. In
|
||||
addition, `--cap-add SYS_ADMIN` may be required.
|
||||
2. You can use "fdtransfer": see the help for `--fdtransfer`.
|
||||
3. Last, you may fall back to `-e ctimer` profiling mode, see [Troubleshooting](Troubleshooting.md).
|
||||
@@ -1,308 +0,0 @@
|
||||
# Profiling modes
|
||||
|
||||
Besides CPU time, async-profiler provides various other profiling modes such as `Allocation`, `Wall Clock`, `Java Method`
|
||||
and even a `Multiple Events` profiling mode.
|
||||
|
||||
## CPU profiling
|
||||
|
||||
In this mode, profiler collects stack trace samples that include **Java** methods,
|
||||
**native** calls, **JVM** code and **kernel** functions.
|
||||
|
||||
The general approach is receiving call stacks generated by `perf_events`
|
||||
and matching them up with call stacks generated by `AsyncGetCallTrace`,
|
||||
in order to produce an accurate profile of both Java and native code.
|
||||
Additionally, async-profiler provides a workaround to recover stack traces
|
||||
in some [corner cases](https://bugs.openjdk.java.net/browse/JDK-8178287)
|
||||
where `AsyncGetCallTrace` fails.
|
||||
|
||||
This approach has the following advantages compared to using `perf_events`
|
||||
directly with a Java agent that translates addresses to Java method names:
|
||||
|
||||
- Does not require `-XX:+PreserveFramePointer`, which introduces
|
||||
performance overhead that can be sometimes as high as 10%.
|
||||
|
||||
- Does not require starting JVM with an agent for translating Java code addresses
|
||||
to method names.
|
||||
|
||||
- Displays interpreter frames.
|
||||
|
||||
- Does not produce large intermediate files (perf.data) for further processing in
|
||||
user space scripts.
|
||||
|
||||
If you wish to resolve frames within `libjvm`, the [debug symbols](#installing-debug-symbols) are required.
|
||||
|
||||
## ALLOCATION profiling
|
||||
|
||||
The profiler can be configured to collect call sites where the largest amount
|
||||
of heap memory is allocated.
|
||||
|
||||
async-profiler does not use intrusive techniques like bytecode instrumentation
|
||||
or expensive DTrace probes which have significant performance impact.
|
||||
It also does not affect Escape Analysis or prevent from JIT optimizations
|
||||
like allocation elimination. Only actual heap allocations are measured.
|
||||
|
||||
The profiler features TLAB-driven sampling. It relies on HotSpot-specific
|
||||
callbacks to receive two kinds of notifications:
|
||||
|
||||
- when an object is allocated in a newly created TLAB;
|
||||
- when an object is allocated on a slow path outside TLAB.
|
||||
|
||||
Sampling interval can be adjusted with `--alloc` option.
|
||||
For example, `--alloc 500k` will take one sample after 500 KB of allocated
|
||||
space on average. Prior to JDK 11, intervals less than TLAB size will not take effect.
|
||||
|
||||
In allocation profiling mode, the top frame of every call trace is the class
|
||||
of the allocated object, and the counter is the heap pressure (the total size
|
||||
of allocated TLABs or objects outside TLAB).
|
||||
|
||||
### Installing Debug Symbols
|
||||
|
||||
Prior to JDK 11, the allocation profiler required HotSpot debug symbols.
|
||||
Some OpenJDK distributions (Amazon Corretto, Liberica JDK, Azul Zulu)
|
||||
already have them embedded in `libjvm.so`, other OpenJDK builds typically
|
||||
provide debug symbols in a separate package. For example, to install
|
||||
OpenJDK debug symbols on Debian / Ubuntu, run:
|
||||
|
||||
```
|
||||
# apt install openjdk-17-dbg
|
||||
```
|
||||
|
||||
(replace `17` with the desired version of JDK).
|
||||
|
||||
On CentOS, RHEL and some other RPM-based distributions, this could be done with
|
||||
[debuginfo-install](http://man7.org/linux/man-pages/man1/debuginfo-install.1.html) utility:
|
||||
|
||||
```
|
||||
# debuginfo-install java-1.8.0-openjdk
|
||||
```
|
||||
|
||||
On Gentoo, the `icedtea` OpenJDK package can be built with the per-package setting
|
||||
`FEATURES="nostrip"` to retain symbols.
|
||||
|
||||
The `gdb` tool can be used to verify if debug symbols are properly installed for the `libjvm` library.
|
||||
For example, on Linux:
|
||||
|
||||
```
|
||||
$ gdb $JAVA_HOME/lib/server/libjvm.so -ex 'info address UseG1GC'
|
||||
```
|
||||
|
||||
This command's output will either contain `Symbol "UseG1GC" is at 0xxxxx`
|
||||
or `No symbol "UseG1GC" in current context`.
|
||||
|
||||
## Native memory leaks
|
||||
|
||||
The profiling mode `nativemem` records `malloc`, `realloc`, `calloc` and `free` calls
|
||||
with the addresses, so that allocations can be matched with frees. This helps to focus
|
||||
the profile report only on unfreed allocations, which are the likely to be a source of a memory leak.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
asprof start -e nativemem -f app.jfr <YourApp>
|
||||
# or
|
||||
asprof start --nativemem N -f app.jfr <YourApp>
|
||||
# or if only allocation calls are interesting, do not collect free calls:
|
||||
asprof start --nativemem N --nofree -f app.jfr <YourApp>
|
||||
|
||||
asprof stop <YourApp>
|
||||
```
|
||||
|
||||
Now we need to process the jfr file, to find native memory leaks:
|
||||
|
||||
```
|
||||
# --total for bytes, default counts invocations.
|
||||
jfrconv --total --nativemem --leak app.jfr app-leak.html
|
||||
|
||||
# No leak analysis, include all native allocations:
|
||||
jfrconv --total --nativemem app.jfr app-malloc.html
|
||||
```
|
||||
|
||||
When `--leak` option is used, the generated flame graph will show allocations without matching `free` calls. If `-nofree` is specified, every allocation will be reported as a leak:
|
||||
|
||||

|
||||
|
||||
The overhead of `nativemem` profiling depends on the number of native allocations,
|
||||
but is usually small enough even for production use. If required, the overhead can be reduced
|
||||
by configuring the profiling interval. E.g. if you add `nativemem=1m` profiler option,
|
||||
allocation samples will be limited to at most one sample per allocated megabyte.
|
||||
|
||||
### Using LD_PRELOAD for finding native memory leaks
|
||||
|
||||
Similar to Java applications, `nativemem` mode can be also used with [non-Java processes](ProfilingNonJavaApplications.md).
|
||||
|
||||
Run an application with `nativemem` profiler that dumps recordings in JFR format every 10 minutes:
|
||||
|
||||
```
|
||||
LD_PRELOAD=/path/to/libasyncProfiler.so ASPROF_COMMAND=start,nativemem,total,loop=10m,cstack=dwarf,file=profile-%t.jfr NativeApp [args]
|
||||
```
|
||||
|
||||
Then run `jfrconv` to generate memory leak report as a flame graph:
|
||||
|
||||
```
|
||||
jfrconv --total --nativemem --leak <profile>.jfr <profile>-leak.html
|
||||
```
|
||||
|
||||
## Wall-clock profiling
|
||||
|
||||
`-e wall` option tells async-profiler to sample all threads equally every given
|
||||
period of time regardless of thread status: Running, Sleeping or Blocked.
|
||||
For instance, this can be helpful when profiling application start-up time.
|
||||
|
||||
Wall-clock profiler is most useful in per-thread mode: `-t`.
|
||||
|
||||
Example: `asprof -e wall -t -i 50ms -f result.html 8983`
|
||||
|
||||
## Lock profiling
|
||||
|
||||
`-e lock` option tells async-profiler to measure lock contention in the profiled application. Lock profiling can help
|
||||
developers understand lock acquisition patterns, lock contention (when threads have to wait to acquire locks), time
|
||||
spent waiting for locks and which code paths are blocked due to locks.
|
||||
|
||||
In lock profiling mode, the top frame is the class of lock/monitor, and the counter is number of nanoseconds it took to
|
||||
enter this lock/monitor.
|
||||
|
||||
Example: `asprof -e lock -t -i 5ms -f result.html 8983`
|
||||
|
||||
## Java method profiling
|
||||
|
||||
`-e ClassName.methodName` option instruments the given Java method
|
||||
in order to record all invocations of this method with the stack traces.
|
||||
|
||||
Example: `-e java.util.Properties.getProperty` will profile all places
|
||||
where `getProperty` method is called from.
|
||||
|
||||
Only non-native Java methods are supported. To profile a native method,
|
||||
use hardware breakpoint event instead, e.g. `-e Java_java_lang_Throwable_fillInStackTrace`
|
||||
|
||||
**Be aware** that if you attach async-profiler at runtime, the first instrumentation
|
||||
of a non-native Java method may cause the [deoptimization](https://github.com/openjdk/jdk/blob/bf2e9ee9d321ed289466b2410f12ad10504d01a2/src/hotspot/share/prims/jvmtiRedefineClasses.cpp#L4092-L4096)
|
||||
of all compiled methods. The subsequent instrumentation flushes only the _dependent code_.
|
||||
|
||||
The massive CodeCache flush doesn't occur if attaching async-profiler as an agent.
|
||||
|
||||
### Java native method profiling
|
||||
|
||||
Here are some useful native methods to profile:
|
||||
|
||||
- `G1CollectedHeap::humongous_obj_allocate` - trace _humongous allocations_ of the G1 GC,
|
||||
- `JVM_StartThread` - trace creation of new Java threads,
|
||||
- `Java_java_lang_ClassLoader_defineClass1` - trace class loading.
|
||||
|
||||
## Multiple events
|
||||
|
||||
It is possible to profile CPU, allocations, and locks at the same time.
|
||||
Instead of CPU, you may choose any other execution event: wall-clock,
|
||||
perf event, tracepoint, Java method, etc.
|
||||
|
||||
The only output format that supports multiple events together is JFR.
|
||||
The recording will contain the following event types:
|
||||
|
||||
- `jdk.ExecutionSample`
|
||||
- `jdk.ObjectAllocationInNewTLAB` (alloc)
|
||||
- `jdk.ObjectAllocationOutsideTLAB` (alloc)
|
||||
- `jdk.JavaMonitorEnter` (lock)
|
||||
- `jdk.ThreadPark` (lock)
|
||||
|
||||
To start profiling cpu + allocations + locks together, specify
|
||||
|
||||
```
|
||||
asprof -e cpu,alloc,lock -f profile.jfr ...
|
||||
```
|
||||
|
||||
or use `--alloc` and `--lock` parameters with the desired threshold:
|
||||
|
||||
```
|
||||
asprof -e cpu --alloc 2m --lock 10ms -f profile.jfr ...
|
||||
```
|
||||
|
||||
The same, when starting profiler as an agent:
|
||||
|
||||
```
|
||||
-agentpath:/path/to/libasyncProfiler.so=start,event=cpu,alloc=2m,lock=10ms,file=profile.jfr
|
||||
```
|
||||
|
||||
### Multi-event profiling using `--all`
|
||||
|
||||
The `--all` flag offers a way to simultaneously enable predefined collection of common profiling events. By default, `--all` activates profiling for `cpu`, `wall`, `alloc`, `live`, `lock` and `nativemem`.
|
||||
|
||||
**Important consideration**
|
||||
|
||||
While the `--all` flag can be useful for development environments to get a wide overview, it is not recommended to enable this in production, especially for continuous profiling. Users are invited to select carefully what to profile and with what settings.
|
||||
|
||||
**Sample command:**
|
||||
|
||||
This command enables the default set of events included in `--all`:
|
||||
|
||||
```
|
||||
asprof --all -f profile.jfr
|
||||
```
|
||||
|
||||
or combine it with `--alloc`/`--wall`/`--lock`/`--nativemem` options to override individual settings. For example:
|
||||
|
||||
```
|
||||
asprof --all --alloc 2m --lock 10ms -f profile.jfr
|
||||
```
|
||||
|
||||
The same, when starting profiler as an agent:
|
||||
|
||||
```
|
||||
-agentpath:/path/to/libasyncProfiler.so=start,all,alloc=2m,lock=10ms,file=profile.jfr
|
||||
```
|
||||
|
||||
Instead of `cpu`, it is possible to override the `--all` parameter with any other event type of your choice. For instance, the following command will profile `cycles` along with ` wall`, `alloc`, `live`, `lock` and `nativemem`:
|
||||
|
||||
```
|
||||
asprof --all -e cycles -f profile.jfr
|
||||
```
|
||||
|
||||
## Continuous profiling
|
||||
|
||||
Continuous profiling is a means by which an application can be profiled
|
||||
continuously and dump profile results every specified time period.
|
||||
It is a very effective technique in finding performance degradations proactively
|
||||
and efficiently. Continuous profiling helps users to understand performance
|
||||
differences between versions of the same application. Recent outputs can
|
||||
be compared with continuous profiling output history to find differences
|
||||
and optimize the changes introduced in case of performance degradations.
|
||||
aysnc-profiler provides the ability to continously profile an application with
|
||||
the `loop` option. Make sure the filename includes a timestamp pattern, or the
|
||||
output will be overwritten on each iteration.
|
||||
|
||||
```
|
||||
asprof --loop 1h -f /var/log/profile-%t.jfr 8983
|
||||
```
|
||||
|
||||
## perf event types supported on Linux
|
||||
|
||||
| Usage | Description |
|
||||
| ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Predefined: | |
|
||||
| `-e cpu-clock` | High-resolution per-CPU timer. Similar to `-e cpu` but forces using perf_events. |
|
||||
| `-e page-faults` | Software page faults |
|
||||
| `-e context-switches` | Context switches |
|
||||
| `-e cycles` | Total CPU cycles |
|
||||
| `-e instructions` | Retired CPU instructions |
|
||||
| `-e cache-references` | Cache accesses (usually Last Level Cache, but may depend on the architecture) |
|
||||
| `-e cache-misses` | Cache accesses requiring fetching data from a higher-level cache or main memory |
|
||||
| `-e branch-instructions` | Retired branch instructions |
|
||||
| `-e branch-misses` | Mispredicted branch instructions |
|
||||
| `-e bus-cycles` | Bus cycles |
|
||||
| `-e L1-dcache-load-misses` | Cache misses on Level 1 Data Cache |
|
||||
| `-e LLC-load-misses` | Cache misses on the Last Level Cache |
|
||||
| `-e dTLB-load-misses` | Data load misses on the Translation Lookaside Buffer |
|
||||
| Breakpoint: | |
|
||||
| `-e mem:<addr>` | Breakpoint on a decimal or hex (0x) address |
|
||||
| `-e mem:<func>` | Breakpoint on a public or a private symbol |
|
||||
| `-e mem:<func>[+<offset>][/<len>][:rwx>]` | Breakpoint on a symbol or an address with offset, length and read/write/exec. Address, offset and length can be hex or dec. The format of `mem` event is the same as in [`perf-record`](https://man7.org/linux/man-pages/man1/perf-record.1.html). |
|
||||
| `-e <symbol>` | Equivalent to an execution breakpoint on a symbol: `mem:<symbol>:x`. Example: `-e strcmp` will trace all calls of native `strcmp` function. |
|
||||
| Tracepoint: | |
|
||||
| `-e trace:<id>` | Kernel tracepoint with the given numeric id |
|
||||
| `-e <tracepoint>` | Kernel tracepoint with the specified name. Example: `-e syscalls:sys_enter_open` will trace all `open` syscalls. |
|
||||
| Probes: | |
|
||||
| `-e kprobe:<func>[+<offset>]` | Kernel probe. Example: `-e kprobe:do_sys_open`. |
|
||||
| `-e kretprobe:<func>[+<offset>]` | Kernel return probe. Example: `-e kretprobe:do_sys_open`. |
|
||||
| `-e uprobe:<func>[+<offset>]` | Userspace probe. Example: `-e uprobe:/usr/lib64/libc-2.17.so+0x114790`. |
|
||||
| `-e uretprobe:<func>[+<offset>]` | Userspace return probe |
|
||||
| PMU: | |
|
||||
| `-e r<NNN>` | Architecture-specific PMU event with the given number. Example: `-e r4d2` selects `MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM` event, which corresponds to event 0xd2, umask 0x4. |
|
||||
| `-e <pmu descriptor>` | PMU event descriptor. Example: `-e cpu/cache-misses/`, `-e cpu/event=0xd2,umask=4/`. The same syntax can be used for uncore and vendor-specific events, e.g. `amd_l3/event=0x01,umask=0x80/` |
|
||||
@@ -1,95 +0,0 @@
|
||||
# Profiling Non-Java applications
|
||||
|
||||
The scope of profiling non-Java applications is limited to the case when profiler is controlled
|
||||
programmatically from the process being profiled or with `LD_PRELOAD`. It is worth noting that
|
||||
[dynamic attach](IntegratingAsyncProfiler.md#launching-as-an-agent)
|
||||
which is available for Java is not supported for non-Java profiling.
|
||||
|
||||
## LD_PRELOAD
|
||||
|
||||
async-profiler can be injected into a native application through the `LD_PRELOAD` mechanism:
|
||||
|
||||
```
|
||||
LD_PRELOAD=/path/to/libasyncProfiler.so ASPROF_COMMAND=start,event=cpu,file=profile.jfr NativeApp [args]
|
||||
```
|
||||
|
||||
All basic functionality remains the same. Profiler can run in `cpu`, `wall`, `nativemem` and other perf_events
|
||||
modes. Flame Graph and JFR output formats are supported, although JFR files will obviously lack
|
||||
Java-specific events.
|
||||
|
||||
See [Profiling Modes](ProfilingModes.md) for more examples.
|
||||
|
||||
## Controlling async-profiler via the C API
|
||||
|
||||
Similar to the
|
||||
[Java API](IntegratingAsyncProfiler.md#using-java-api),
|
||||
there is a C API for using profiler inside a native application.
|
||||
|
||||
Header file for the API is bundled in the async-profiler release package under [`include/asprof.h`](../src/asprof.h).
|
||||
|
||||
To use it in a C/C++ application, include the mentioned `asprof.h`. Below is an example showing how to invoke async-profiler with the API:
|
||||
|
||||
```
|
||||
#include "asprof.h"
|
||||
#include <dlfcn.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
void test_output_callback(const char* buffer, size_t size) {
|
||||
fwrite(buffer, sizeof(char), size, stderr);
|
||||
}
|
||||
|
||||
int main() {
|
||||
void* lib = dlopen("/path/to/libasyncProfiler.so", RTLD_NOW);
|
||||
if (lib == NULL) {
|
||||
printf("%s\n", dlerror());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
asprof_init_t asprof_init = (asprof_init_t)dlsym(lib, "asprof_init");
|
||||
asprof_execute_t asprof_execute = (asprof_execute_t)dlsym(lib, "asprof_execute");
|
||||
asprof_error_str_t asprof_error_str = (asprof_error_str_t)dlsym(lib, "asprof_error_str");
|
||||
|
||||
if (asprof_init == NULL || asprof_execute == NULL || asprof_error_str == NULL) {
|
||||
printf("%s\n", dlerror());
|
||||
dlclose(lib);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
asprof_init();
|
||||
|
||||
printf("Starting profiler\n");
|
||||
|
||||
char cmd[] = "start,event=cpu,loglevel=debug,file=profile.jfr";
|
||||
asprof_error_t err = asprof_execute(cmd, test_output_callback);
|
||||
if (err != NULL) {
|
||||
fprintf(stderr, "%s\n", asprof_error_str(err));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// ... some meaningful work ...
|
||||
|
||||
printf("Stopping profiler\n");
|
||||
|
||||
err = asprof_execute("stop", test_output_callback);
|
||||
if (err != NULL) {
|
||||
fprintf(stderr, "%s\n", asprof_error_str(err));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
## Unstable APIs
|
||||
|
||||
These APIs are unstable and might change or be removed in the next version of async-profiler.
|
||||
|
||||
### Advanced Sampling
|
||||
|
||||
The `asprof_get_thread_local_data` function returns a pointer to async-profiler's
|
||||
thread-local data structure. The structure is guaranteed to live as long as the thread.
|
||||
|
||||
The returned structure contains a pointer that increments every time there is a sample. This gives
|
||||
native code an easy way to detect when a sample event had occurred, and to log metadata about what the
|
||||
program was doing when the event happened.
|
||||
@@ -1,64 +0,0 @@
|
||||
# Stack Walking Modes
|
||||
|
||||
## Frame Pointer
|
||||
|
||||
The default stacking walking in async-profiler, `Frame Pointer (FP)` stack walking, is a technique for collecting call
|
||||
stacks by tracking frame pointers in memory. Each function call maintains a pointer to its caller's stack frame, creating
|
||||
a linked chain that can be traversed to reconstruct the program's execution path. It's particularly efficient as it is
|
||||
very fast compared to other stack walking methods introducing less overhead but requires code to be compiled with frame
|
||||
pointers enabled (`-fno-omit-frame-pointer`).
|
||||
|
||||
## DWARF
|
||||
|
||||
DWARF stack walking is a method to reconstruct call stacks using unwinding information embedded in executables
|
||||
(typically in `.eh_frame` section). Unlike frame-pointer-based unwinding, it works reliably even with optimized code
|
||||
where frame pointers are omitted.
|
||||
|
||||
DWARF unwinding requires extra memory (e.g. the lookup table for `libjvm.so` is about 2MB).
|
||||
It is also slower than the traditional FP-based stack walker, but it's still fast enough for on-the-fly unwinding
|
||||
due to being signal safe in async-profiler.
|
||||
|
||||
The feature can be enabled with the option `--cstack dwarf` (or its agent equivalent `cstack=dwarf`).
|
||||
|
||||
## LBR
|
||||
|
||||
Modern Intel CPUs can profile branch instructions, including `call`s and `ret`s, and store their source and destination
|
||||
addresses (Last Branch Records) in hardware registers. Starting from Haswell, CPU can match these addresses to form a
|
||||
branch stack. This branch stack will be effectively a call chain automatically collected by the hardware.
|
||||
|
||||
LBR stacks are not always complete or accurate, but they still appear much more helpful comparing to FP-based stack
|
||||
walking, when a native library is compiled with omitted frame pointers. It works only with hardware events like
|
||||
`-e cycles` (`instructions`, `cache-misses` etc.) and the maximum call chain depth is 32 (hardware limit).
|
||||
|
||||
The feature can be enabled with the option `--cstack lbr` (or its agent equivalent `cstack=lbr`).
|
||||
|
||||
## VM Structs
|
||||
|
||||
async-profiler can leverage JVM internal structures to replicate the logic of Java stack walking
|
||||
in the profiler itself without depending on the unstable JVM API.
|
||||
|
||||
This mode of stack walking has been introduced in async-profiler due to issues with `AsyncCallGetTrace`.
|
||||
AsyncGetCallTrace (AGCT) is a non-standard extension of HotSpot JVM to obtain Java stack traces outside safepoints.
|
||||
async-profiler had been relying on AGCT heavily, and it even got its name after this function.
|
||||
|
||||
`AsyncGetCallTrace` being non-API, was never supported in OpenJDK well enough, it did not receive enough testing, it was
|
||||
broken several times even in minor JDK updates, e.g. [JDK-8307549](https://bugs.openjdk.org/browse/JDK-8307549).
|
||||
|
||||
AsyncGetCallTrace is notorious for its inability to walk Java stack in different corner cases. There is a long-standing
|
||||
bug [JDK-8178287](https://bugs.openjdk.org/browse/JDK-8178287) with several examples. But the worst aspect is that
|
||||
AsyncGetCallTrace can crash JVM, and there is no reliable way to get around this outside the JVM.
|
||||
|
||||
Due to issues with AGCT from time to time, including random crashes and missing stack traces,
|
||||
`vm` stack walking mode based on HotSpot VM Structs was introduced in async-profiler.
|
||||
`vm` stack walker has the following advantages:
|
||||
|
||||
- Fully enclosed by the crash protection based on `setjmp`/`longjmp`.
|
||||
- Can show all frames: Java, native and JVM stubs throughout the whole stack.
|
||||
- Provides additional information on each frame, like JIT compilation type.
|
||||
|
||||
The feature can be enabled with the option `--cstack vm` (or its agent equivalent `cstack=vm`).
|
||||
|
||||
Another variant of this option: `--cstack vmx` activates an "expert" unwinding based on VM Structs.
|
||||
With this option, async-profiler collects mixed stack traces that have Java and native frames interleaved.
|
||||
|
||||
The maximum stack depth for `vm` or `vmx` stack walking is controlled with `-j depth` option.
|
||||
@@ -1,133 +0,0 @@
|
||||
# Troubleshooting
|
||||
|
||||
## Error Messages
|
||||
|
||||
### perf_event mmap failed: Operation not permitted
|
||||
|
||||
Profiler allocates 8 kB perf_event buffer for each thread of the target process.
|
||||
The above error may appear if the total size of perf_event buffers (`8 * threads` kB)
|
||||
exceeds locked memory limit. This limit is comprised of `ulimit -l` plus
|
||||
the value of `kernel.perf_event_mlock_kb` sysctl multiplied by the number of CPU cores.
|
||||
For example, on a 16-core machine, `ulimit -l 65536` and `kernel.perf_event_mlock_kb=516`
|
||||
is enough for profiling `(65536 + 516*16) / 8 = 9224` threads.
|
||||
If an application has more threads, increase one of the above limits, or native stacks
|
||||
will not be collected for some threads.
|
||||
|
||||
A privileged process is not subject to the locked memory limit.
|
||||
|
||||
### Failed to change credentials to match the target process: Operation not permitted
|
||||
|
||||
Due to limitation of HotSpot Dynamic Attach mechanism, the profiler must be run
|
||||
by exactly the same user (and group) as the owner of target JVM process.
|
||||
If profiler is run by a different user, it will try to automatically change
|
||||
current user and group. This will likely succeed for `root`, but not for
|
||||
other users, resulting in the above error.
|
||||
|
||||
### Could not start attach mechanism: No such file or directory
|
||||
|
||||
The profiler cannot establish communication with the target JVM through UNIX domain socket.
|
||||
Usually this happens in one of the following cases:
|
||||
|
||||
1. Attach socket `/tmp/.java_pidNNN` has been deleted. It is a common
|
||||
practice to clean `/tmp` automatically with some scheduled script.
|
||||
Configure the cleanup software to exclude `.java_pid*` files from deletion.
|
||||
|
||||
- How to check: run `lsof -p PID | grep java_pid`. If it lists a socket file, but the file does not exist, then this is exactly
|
||||
the described problem.
|
||||
|
||||
2. JVM is started with `-XX:+DisableAttachMechanism` option.
|
||||
3. `/tmp` directory of Java process is not physically the same directory
|
||||
as `/tmp` of your shell, because Java is running in a container or in
|
||||
`chroot` environment. `asprof` attempts to solve this automatically,
|
||||
but it might lack the required permissions to do so.
|
||||
- Check `strace asprof PID jcmd`
|
||||
4. JVM is busy and cannot reach a safepoint. For instance,
|
||||
JVM is in the middle of long-running garbage collection.
|
||||
- How to check: run `kill -3 PID`. Healthy JVM process should print
|
||||
a thread dump and heap info in its console.
|
||||
|
||||
### Target JVM failed to load libasyncProfiler.so
|
||||
|
||||
The connection with the target JVM has been established, but JVM is unable to load profiler shared library.
|
||||
Make sure the user of JVM process has permissions to access `libasyncProfiler.so` by exactly the same absolute path.
|
||||
For more information see [#78](https://github.com/async-profiler/async-profiler/issues/78).
|
||||
|
||||
### Perf events unavailable
|
||||
|
||||
`perf_event_open()` syscall has failed. Typical reasons include:
|
||||
|
||||
1. `/proc/sys/kernel/perf_event_paranoid` is set to restricted mode (>=2).
|
||||
2. seccomp disables `perf_event_open` API in a container.
|
||||
3. OS runs under a hypervisor that does not virtualize performance counters.
|
||||
4. perf_event_open API is not supported on this system, e.g. WSL.
|
||||
|
||||
<br>For permissions-related reasons (such as 1 and 2), using `--fdtransfer` while running the profiler
|
||||
as a privileged user may solve the issue.
|
||||
|
||||
If changing the configuration is not possible, you may fall back to
|
||||
`-e ctimer` profiling mode. It is similar to `cpu` mode, but does not
|
||||
require perf_events support. As a drawback, there will be no kernel
|
||||
stack traces.
|
||||
|
||||
### No AllocTracer symbols found. Are JDK debug symbols installed?
|
||||
|
||||
The OpenJDK debug symbols are required for allocation profiling for applications developed
|
||||
with JDK prior to 11. See [Installing Debug Symbols](ProfilingModes.md#installing-debug-symbols) for more
|
||||
details. If the error message persists after a successful installation of the debug symbols,
|
||||
it is possible that the JDK was upgraded when installing the debug symbols.
|
||||
In this case, profiling any Java process which had started prior to the installation
|
||||
will continue to display this message, since the process had loaded
|
||||
the older version of the JDK which lacked debug symbols.
|
||||
Restarting the affected Java processes should resolve the issue.
|
||||
|
||||
### VMStructs unavailable. Unsupported JVM?
|
||||
|
||||
JVM shared library does not export `gHotSpotVMStructs*` symbols -
|
||||
apparently this is not a HotSpot JVM. Sometimes the same message
|
||||
can be also caused by an incorrectly built JDK
|
||||
(see [#218](https://github.com/async-profiler/async-profiler/issues/218)).
|
||||
In these cases installing JDK debug symbols may solve the problem.
|
||||
|
||||
### Could not parse symbols from <libname.so>
|
||||
|
||||
Async-profiler was unable to parse non-Java function names because of
|
||||
the corrupted contents in `/proc/[pid]/maps`. The problem is known to
|
||||
occur in a container when running Ubuntu with Linux kernel 5.x.
|
||||
This is the OS bug, see <https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1843018>.
|
||||
|
||||
### Could not open output file
|
||||
|
||||
Output file is written by the target JVM process, not by the profiler script.
|
||||
Make sure the path specified in `-f` option is correct and is accessible by the JVM.
|
||||
|
||||
## Known Limitations
|
||||
|
||||
- No Java stacks will be collected if `-XX:MaxJavaStackTraceDepth` is zero
|
||||
or negative. The exception is `--cstack vm` mode, which does not take
|
||||
`MaxJavaStackTraceDepth` into account.
|
||||
|
||||
- Too short profiling interval may cause continuous interruption of heavy
|
||||
system calls like `clone()`, so that it will never complete;
|
||||
see [#97](https://github.com/async-profiler/async-profiler/issues/97).
|
||||
The workaround is simply to increase the interval.
|
||||
|
||||
- When agent is not loaded at JVM startup (by using -agentpath option) it is
|
||||
highly recommended to use `-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints` JVM flags.
|
||||
Without those flags the profiler will still work correctly but results might be
|
||||
less accurate. For example, without `-XX:+DebugNonSafepoints` there is a high chance
|
||||
that simple inlined methods will not appear in the profile. When the agent is attached at runtime,
|
||||
`CompiledMethodLoad` JVMTI event enables debug info, but only for methods compiled after attaching.
|
||||
|
||||
- On most Linux systems, `perf_events` captures call stacks with a maximum depth
|
||||
of 127 frames. On recent Linux kernels, this can be configured using
|
||||
`sysctl kernel.perf_event_max_stack` or by writing to the
|
||||
`/proc/sys/kernel/perf_event_max_stack` file.
|
||||
|
||||
- You will not see the non-Java frames _preceding_ the Java frames on the
|
||||
stack, unless `--cstack vmx` is specified.
|
||||
For example, if `start_thread` called `JavaMain` and then your Java
|
||||
code started running, you will not see the first two frames in the resulting
|
||||
stack. On the other hand, you _will_ see non-Java frames (user and kernel)
|
||||
invoked by your Java code.
|
||||
|
||||
- macOS profiling is limited to user space code only.
|
||||
@@ -1,118 +0,0 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>tools.profiler</groupId>
|
||||
<artifactId>jfr-converter</artifactId>
|
||||
<version>4.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>async-profiler</name>
|
||||
<url>https://profiler.tools</url>
|
||||
<description>Low overhead sampling profiler for Java</description>
|
||||
|
||||
<licenses>
|
||||
<license>
|
||||
<name>Apache License Version 2.0</name>
|
||||
<url>http://www.apache.org/licenses/LICENSE-2.0</url>
|
||||
<distribution>repo</distribution>
|
||||
</license>
|
||||
</licenses>
|
||||
<scm>
|
||||
<url>https://github.com/async-profiler/async-profiler</url>
|
||||
<connection>scm:git:git@github.com:async-profiler/async-profiler.git</connection>
|
||||
<developerConnection>scm:git:git@github.com:async-profiler/async-profiler.git</developerConnection>
|
||||
</scm>
|
||||
<developers>
|
||||
<developer>
|
||||
<id>apangin</id>
|
||||
<name>Andrei Pangin</name>
|
||||
<email>noreply@pangin.pro</email>
|
||||
</developer>
|
||||
</developers>
|
||||
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
</properties>
|
||||
|
||||
<build>
|
||||
<sourceDirectory>src/converter</sourceDirectory>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/res</directory>
|
||||
</resource>
|
||||
</resources>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.8.1</version>
|
||||
<configuration>
|
||||
<release>8</release>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.3.0</version>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<mainClass>Main</mainClass>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>3.2.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar-no-fork</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>3.2.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-gpg-plugin</artifactId>
|
||||
<version>1.6</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>sign-artifacts</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>sign</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
</snapshotRepository>
|
||||
<repository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/service/local/staging/deploy/maven2</url>
|
||||
</repository>
|
||||
</distributionManagement>
|
||||
</project>
|
||||
44
pom.xml
@@ -3,7 +3,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>tools.profiler</groupId>
|
||||
<artifactId>async-profiler</artifactId>
|
||||
<version>4.0</version>
|
||||
<version>1.8.3</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>async-profiler</name>
|
||||
@@ -18,9 +18,9 @@
|
||||
</license>
|
||||
</licenses>
|
||||
<scm>
|
||||
<url>https://github.com/async-profiler/async-profiler</url>
|
||||
<connection>scm:git:git@github.com:async-profiler/async-profiler.git</connection>
|
||||
<developerConnection>scm:git:git@github.com:async-profiler/async-profiler.git</developerConnection>
|
||||
<url>https://github.com/jvm-profiling-tools/async-profiler</url>
|
||||
<connection>scm:git:git@github.com:jvm-profiling-tools/async-profiler.git</connection>
|
||||
<developerConnection>scm:git:git@github.com:jvm-profiling-tools/async-profiler.git</developerConnection>
|
||||
</scm>
|
||||
<developers>
|
||||
<developer>
|
||||
@@ -36,50 +36,20 @@
|
||||
|
||||
<build>
|
||||
<sourceDirectory>src/api</sourceDirectory>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>native</directory>
|
||||
</resource>
|
||||
</resources>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.8.1</version>
|
||||
<configuration>
|
||||
<release>8</release>
|
||||
<source>1.6</source>
|
||||
<target>1.6</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.3.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<classifier>${native.platform}</classifier>
|
||||
<includes>
|
||||
<include>${native.platform}/*</include>
|
||||
<include>one/**</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>3.2.0</version>
|
||||
<configuration>
|
||||
<excludes>
|
||||
<exclude>linux*/**</exclude>
|
||||
<exclude>macos*/**</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
@@ -129,4 +99,4 @@
|
||||
<url>https://oss.sonatype.org/service/local/staging/deploy/maven2</url>
|
||||
</repository>
|
||||
</distributionManagement>
|
||||
</project>
|
||||
</project>
|
||||
282
profiler.sh
Executable file
@@ -0,0 +1,282 @@
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [action] [options] <pid>"
|
||||
echo "Actions:"
|
||||
echo " start start profiling and return immediately"
|
||||
echo " resume resume profiling without resetting collected data"
|
||||
echo " stop stop profiling"
|
||||
echo " jstack get a thread dump"
|
||||
echo " check check if the specified profiling event is available"
|
||||
echo " status print profiling status"
|
||||
echo " list list profiling events supported by the target JVM"
|
||||
echo " collect collect profile for the specified period of time"
|
||||
echo " and then stop (default action)"
|
||||
echo "Options:"
|
||||
echo " -e event profiling event: cpu|alloc|lock|cache-misses etc."
|
||||
echo " -d duration run profiling for <duration> seconds"
|
||||
echo " -f filename dump output to <filename>"
|
||||
echo " -i interval sampling interval in nanoseconds"
|
||||
echo " -j jstackdepth maximum Java stack depth"
|
||||
echo " -t profile different threads separately"
|
||||
echo " -s simple class names instead of FQN"
|
||||
echo " -g print method signatures"
|
||||
echo " -a annotate Java method names"
|
||||
echo " -o fmt output format: flat|collapsed|html|tree|jfr"
|
||||
echo " -I include output only stack traces containing the specified pattern"
|
||||
echo " -X exclude exclude stack traces with the specified pattern"
|
||||
echo " -v, --version display version string"
|
||||
echo ""
|
||||
echo " --title string FlameGraph title"
|
||||
echo " --minwidth pct skip frames smaller than pct%"
|
||||
echo " --reverse generate stack-reversed FlameGraph / Call tree"
|
||||
echo ""
|
||||
echo " --all-kernel only include kernel-mode events"
|
||||
echo " --all-user only include user-mode events"
|
||||
echo " --cstack mode how to traverse C stack: fp|lbr|no"
|
||||
echo " --begin function begin profiling when function is executed"
|
||||
echo " --end function end profiling when function is executed"
|
||||
echo ""
|
||||
echo "<pid> is a numeric process ID of the target JVM"
|
||||
echo " or 'jps' keyword to find running JVM automatically"
|
||||
echo " or the application's name as it would appear in the jps tool"
|
||||
echo ""
|
||||
echo "Example: $0 -d 30 -f profile.svg 3456"
|
||||
echo " $0 start -i 999000 jps"
|
||||
echo " $0 stop -o flat jps"
|
||||
echo " $0 -d 5 -e alloc MyAppName"
|
||||
exit 1
|
||||
}
|
||||
|
||||
mirror_output() {
|
||||
# Mirror output from temporary file to local terminal
|
||||
if [ "$USE_TMP" = true ]; then
|
||||
if [ -f "$FILE" ]; then
|
||||
cat "$FILE"
|
||||
rm "$FILE"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_if_terminated() {
|
||||
if ! kill -0 "$PID" 2> /dev/null; then
|
||||
mirror_output
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
jattach() {
|
||||
set +e
|
||||
"$JATTACH" "$PID" load "$PROFILER" true "$1" > /dev/null
|
||||
RET=$?
|
||||
set -e
|
||||
|
||||
# Check if jattach failed
|
||||
if [ $RET -ne 0 ]; then
|
||||
if [ $RET -eq 255 ]; then
|
||||
echo "Failed to inject profiler into $PID"
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
otool -L "$PROFILER"
|
||||
else
|
||||
ldd "$PROFILER"
|
||||
fi
|
||||
fi
|
||||
exit $RET
|
||||
fi
|
||||
|
||||
mirror_output
|
||||
}
|
||||
|
||||
OPTIND=1
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" > /dev/null 2>&1; pwd -P)"
|
||||
JATTACH=$SCRIPT_DIR/build/jattach
|
||||
PROFILER=$SCRIPT_DIR/build/libasyncProfiler.so
|
||||
ACTION="collect"
|
||||
EVENT="cpu"
|
||||
DURATION="60"
|
||||
FILE=""
|
||||
USE_TMP="true"
|
||||
OUTPUT=""
|
||||
FORMAT=""
|
||||
PARAMS=""
|
||||
PID=""
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case $1 in
|
||||
-h|"-?")
|
||||
usage
|
||||
;;
|
||||
start|resume|stop|check|status|list|collect)
|
||||
ACTION="$1"
|
||||
;;
|
||||
jstack)
|
||||
ACTION="start"
|
||||
EVENT="jstack"
|
||||
PARAMS="$PARAMS,threads"
|
||||
;;
|
||||
-v|--version)
|
||||
ACTION="version"
|
||||
;;
|
||||
-e)
|
||||
EVENT="$(echo "$2" | sed 's/,/,event=/g')"
|
||||
shift
|
||||
;;
|
||||
-d)
|
||||
DURATION="$2"
|
||||
shift
|
||||
;;
|
||||
-f)
|
||||
FILE="$2"
|
||||
USE_TMP=false
|
||||
shift
|
||||
;;
|
||||
-i)
|
||||
PARAMS="$PARAMS,interval=$2"
|
||||
shift
|
||||
;;
|
||||
-j)
|
||||
PARAMS="$PARAMS,jstackdepth=$2"
|
||||
shift
|
||||
;;
|
||||
-t)
|
||||
PARAMS="$PARAMS,threads"
|
||||
;;
|
||||
-s)
|
||||
FORMAT="$FORMAT,simple"
|
||||
;;
|
||||
-g)
|
||||
FORMAT="$FORMAT,sig"
|
||||
;;
|
||||
-a)
|
||||
FORMAT="$FORMAT,ann"
|
||||
;;
|
||||
-o)
|
||||
OUTPUT="$2"
|
||||
shift
|
||||
;;
|
||||
-I|--include)
|
||||
FORMAT="$FORMAT,include=$2"
|
||||
shift
|
||||
;;
|
||||
-X|--exclude)
|
||||
FORMAT="$FORMAT,exclude=$2"
|
||||
shift
|
||||
;;
|
||||
--filter)
|
||||
FILTER="$(echo "$2" | sed 's/,/;/g')"
|
||||
FORMAT="$FORMAT,filter=$FILTER"
|
||||
shift
|
||||
;;
|
||||
--title)
|
||||
# escape XML special characters and comma
|
||||
TITLE="$(echo "$2" | sed 's/&/\&/g; s/</\</g; s/>/\>/g; s/,/\,/g')"
|
||||
FORMAT="$FORMAT,title=$TITLE"
|
||||
shift
|
||||
;;
|
||||
--width|--height|--minwidth)
|
||||
FORMAT="$FORMAT,${1#--}=$2"
|
||||
shift
|
||||
;;
|
||||
--reverse)
|
||||
FORMAT="$FORMAT,reverse"
|
||||
;;
|
||||
--all-kernel)
|
||||
PARAMS="$PARAMS,allkernel"
|
||||
;;
|
||||
--all-user)
|
||||
PARAMS="$PARAMS,alluser"
|
||||
;;
|
||||
--cstack|--call-graph)
|
||||
PARAMS="$PARAMS,cstack=$2"
|
||||
shift
|
||||
;;
|
||||
--begin|--end)
|
||||
PARAMS="$PARAMS,${1#--}=$2"
|
||||
shift
|
||||
;;
|
||||
--safe-mode)
|
||||
PARAMS="$PARAMS,safemode=$2"
|
||||
shift
|
||||
;;
|
||||
[0-9]*)
|
||||
PID="$1"
|
||||
;;
|
||||
jps)
|
||||
# A shortcut for getting PID of a running Java application
|
||||
# -XX:+PerfDisableSharedMem prevents jps from appearing in its own list
|
||||
PID=$(pgrep -n java || jps -q -J-XX:+PerfDisableSharedMem)
|
||||
if [ "$PID" = "" ]; then
|
||||
echo "No Java process could be found!"
|
||||
fi
|
||||
;;
|
||||
-*)
|
||||
echo "Unrecognized option: $1"
|
||||
usage
|
||||
;;
|
||||
*)
|
||||
if [ $# -eq 1 ]; then
|
||||
# the last argument is the application name as it would appear in the jps tool
|
||||
PID=$(jps -J-XX:+PerfDisableSharedMem | grep " $1$" | head -n 1 | cut -d ' ' -f 1)
|
||||
if [ "$PID" = "" ]; then
|
||||
echo "No Java process '$1' could be found!"
|
||||
fi
|
||||
else
|
||||
echo "Unrecognized option: $1"
|
||||
usage
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ "$PID" = "" ] && [ "$ACTION" != "version" ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
# If no -f argument is given, use temporary file to transfer output to caller terminal.
|
||||
# Let the target process create the file in case this script is run by superuser.
|
||||
if [ "$USE_TMP" = true ]; then
|
||||
FILE=/tmp/async-profiler.$$.$PID
|
||||
else
|
||||
case "$FILE" in
|
||||
/*)
|
||||
# Path is absolute
|
||||
;;
|
||||
*)
|
||||
# Output file is written by the target process. Make the path absolute to avoid confusion.
|
||||
FILE=$PWD/$FILE
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
case $ACTION in
|
||||
start|resume|check)
|
||||
jattach "$ACTION,event=$EVENT,file=$FILE,$OUTPUT$FORMAT$PARAMS"
|
||||
;;
|
||||
stop)
|
||||
jattach "stop,file=$FILE,$OUTPUT$FORMAT"
|
||||
;;
|
||||
status)
|
||||
jattach "status,file=$FILE"
|
||||
;;
|
||||
list)
|
||||
jattach "list,file=$FILE"
|
||||
;;
|
||||
collect)
|
||||
jattach "start,event=$EVENT,file=$FILE,$OUTPUT$FORMAT$PARAMS"
|
||||
while [ "$DURATION" -gt 0 ]; do
|
||||
DURATION=$(( DURATION-1 ))
|
||||
check_if_terminated
|
||||
sleep 1
|
||||
done
|
||||
jattach "stop,file=$FILE,$OUTPUT$FORMAT"
|
||||
;;
|
||||
version)
|
||||
if [ "$PID" = "" ]; then
|
||||
java "-agentpath:$PROFILER=version=full" -version 2> /dev/null
|
||||
else
|
||||
jattach "version=full,file=$FILE"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
@@ -1,27 +1,38 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2017 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "allocTracer.h"
|
||||
#include "os.h"
|
||||
#include "profiler.h"
|
||||
#include "stackFrame.h"
|
||||
#include "tsc.h"
|
||||
#include "vmStructs.h"
|
||||
|
||||
|
||||
int AllocTracer::_trap_kind;
|
||||
Trap AllocTracer::_in_new_tlab(0);
|
||||
Trap AllocTracer::_outside_tlab(1);
|
||||
Trap AllocTracer::_in_new_tlab;
|
||||
Trap AllocTracer::_outside_tlab;
|
||||
|
||||
u64 AllocTracer::_interval;
|
||||
volatile u64 AllocTracer::_allocated_bytes;
|
||||
|
||||
|
||||
// Called whenever our breakpoint trap is hit
|
||||
void AllocTracer::trapHandler(int signo, siginfo_t* siginfo, void* ucontext) {
|
||||
void AllocTracer::signalHandler(int signo, siginfo_t* siginfo, void* ucontext) {
|
||||
StackFrame frame(ucontext);
|
||||
EventType event_type;
|
||||
int event_type;
|
||||
uintptr_t total_size;
|
||||
uintptr_t instance_size;
|
||||
|
||||
@@ -29,18 +40,17 @@ void AllocTracer::trapHandler(int signo, siginfo_t* siginfo, void* ucontext) {
|
||||
if (_in_new_tlab.covers(frame.pc())) {
|
||||
// send_allocation_in_new_tlab(Klass* klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread)
|
||||
// send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size)
|
||||
event_type = ALLOC_SAMPLE;
|
||||
event_type = BCI_ALLOC;
|
||||
total_size = _trap_kind == 1 ? frame.arg2() : frame.arg1();
|
||||
instance_size = _trap_kind == 1 ? frame.arg3() : frame.arg2();
|
||||
} else if (_outside_tlab.covers(frame.pc())) {
|
||||
// send_allocation_outside_tlab(Klass* klass, HeapWord* obj, size_t alloc_size, Thread* thread)
|
||||
// send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size);
|
||||
event_type = ALLOC_OUTSIDE_TLAB;
|
||||
event_type = BCI_ALLOC_OUTSIDE_TLAB;
|
||||
total_size = _trap_kind == 1 ? frame.arg2() : frame.arg1();
|
||||
instance_size = 0;
|
||||
} else {
|
||||
// Not our trap
|
||||
Profiler::instance()->trapHandler(signo, siginfo, ucontext);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -48,37 +58,50 @@ void AllocTracer::trapHandler(int signo, siginfo_t* siginfo, void* ucontext) {
|
||||
uintptr_t klass = frame.arg0();
|
||||
frame.ret();
|
||||
|
||||
if (_enabled && updateCounter(_allocated_bytes, total_size, _interval)) {
|
||||
if (_enabled) {
|
||||
// TODO: _enabled also uses traps
|
||||
recordAllocation(ucontext, event_type, klass, total_size, instance_size);
|
||||
}
|
||||
}
|
||||
|
||||
void AllocTracer::recordAllocation(void* ucontext, EventType event_type, uintptr_t rklass,
|
||||
void AllocTracer::recordAllocation(void* ucontext, int event_type, uintptr_t rklass,
|
||||
uintptr_t total_size, uintptr_t instance_size) {
|
||||
if (_interval) {
|
||||
// Do not record allocation unless allocated at least _interval bytes
|
||||
while (true) {
|
||||
u64 prev = _allocated_bytes;
|
||||
u64 next = prev + total_size;
|
||||
if (next < _interval) {
|
||||
if (__sync_bool_compare_and_swap(&_allocated_bytes, prev, next)) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (__sync_bool_compare_and_swap(&_allocated_bytes, prev, next % _interval)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AllocEvent event;
|
||||
event._start_time = TSC::ticks();
|
||||
event._class_id = 0;
|
||||
event._total_size = total_size;
|
||||
event._instance_size = instance_size;
|
||||
|
||||
if (VMStructs::hasClassNames()) {
|
||||
VMSymbol* symbol = VMKlass::fromHandle(rklass)->name();
|
||||
event._class_id = Profiler::instance()->classMap()->lookup(symbol->body(), symbol->length());
|
||||
event._class_id = Profiler::_instance.classMap()->lookup(symbol->body(), symbol->length());
|
||||
}
|
||||
|
||||
Profiler::instance()->recordSample(ucontext, total_size, event_type, &event);
|
||||
Profiler::_instance.recordSample(ucontext, total_size, event_type, &event);
|
||||
}
|
||||
|
||||
Error AllocTracer::check(Arguments& args) {
|
||||
if (args._live) {
|
||||
return Error("'live' option is supported on OpenJDK 11+");
|
||||
}
|
||||
|
||||
if (_in_new_tlab.entry() != 0 && _outside_tlab.entry() != 0) {
|
||||
return Error::OK;
|
||||
}
|
||||
|
||||
CodeCache* libjvm = VMStructs::libjvm();
|
||||
NativeCodeCache* libjvm = VMStructs::libjvm();
|
||||
const void* ne;
|
||||
const void* oe;
|
||||
|
||||
@@ -95,9 +118,9 @@ Error AllocTracer::check(Arguments& args) {
|
||||
return Error("No AllocTracer symbols found. Are JDK debug symbols installed?");
|
||||
}
|
||||
|
||||
_in_new_tlab.assign(ne);
|
||||
_outside_tlab.assign(oe);
|
||||
_in_new_tlab.pair(_outside_tlab);
|
||||
if (!_in_new_tlab.assign(ne) || !_outside_tlab.assign(oe)) {
|
||||
return Error("Unable to install allocation trap");
|
||||
}
|
||||
|
||||
return Error::OK;
|
||||
}
|
||||
@@ -108,12 +131,13 @@ Error AllocTracer::start(Arguments& args) {
|
||||
return error;
|
||||
}
|
||||
|
||||
_interval = args._alloc > 0 ? args._alloc : 0;
|
||||
_interval = args._interval;
|
||||
_allocated_bytes = 0;
|
||||
|
||||
if (!_in_new_tlab.install() || !_outside_tlab.install()) {
|
||||
return Error("Cannot install allocation breakpoints");
|
||||
}
|
||||
OS::installSignalHandler(SIGTRAP, signalHandler);
|
||||
|
||||
_in_new_tlab.install();
|
||||
_outside_tlab.install();
|
||||
|
||||
return Error::OK;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2017 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef _ALLOCTRACER_H
|
||||
@@ -9,7 +20,6 @@
|
||||
#include <signal.h>
|
||||
#include <stdint.h>
|
||||
#include "engine.h"
|
||||
#include "event.h"
|
||||
#include "trap.h"
|
||||
|
||||
|
||||
@@ -22,27 +32,27 @@ class AllocTracer : public Engine {
|
||||
static u64 _interval;
|
||||
static volatile u64 _allocated_bytes;
|
||||
|
||||
static void recordAllocation(void* ucontext, EventType event_type, uintptr_t rklass,
|
||||
static void signalHandler(int signo, siginfo_t* siginfo, void* ucontext);
|
||||
|
||||
static void recordAllocation(void* ucontext, int event_type, uintptr_t rklass,
|
||||
uintptr_t total_size, uintptr_t instance_size);
|
||||
|
||||
public:
|
||||
const char* type() {
|
||||
return "alloc_tracer";
|
||||
}
|
||||
|
||||
const char* title() {
|
||||
return "Allocation profile";
|
||||
const char* name() {
|
||||
return "alloc";
|
||||
}
|
||||
|
||||
const char* units() {
|
||||
return "bytes";
|
||||
}
|
||||
|
||||
CStack cstack() {
|
||||
return CSTACK_NO;
|
||||
}
|
||||
|
||||
Error check(Arguments& args);
|
||||
Error start(Arguments& args);
|
||||
void stop();
|
||||
|
||||
static void trapHandler(int signo, siginfo_t* siginfo, void* ucontext);
|
||||
};
|
||||
|
||||
#endif // _ALLOCTRACER_H
|
||||
|
||||
@@ -1,14 +1,22 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2018 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package one.profiler;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
/**
|
||||
* Java API for in-process profiling. Serves as a wrapper around
|
||||
@@ -31,93 +39,25 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
return instance;
|
||||
}
|
||||
|
||||
AsyncProfiler profiler = new AsyncProfiler();
|
||||
if (libPath != null) {
|
||||
System.load(libPath);
|
||||
if (libPath == null) {
|
||||
System.loadLibrary("asyncProfiler");
|
||||
} else {
|
||||
try {
|
||||
// No need to load library, if it has been preloaded with -agentpath
|
||||
profiler.getVersion();
|
||||
} catch (UnsatisfiedLinkError e) {
|
||||
File file = extractEmbeddedLib();
|
||||
if (file != null) {
|
||||
try {
|
||||
System.load(file.getPath());
|
||||
} finally {
|
||||
file.delete();
|
||||
}
|
||||
} else {
|
||||
System.loadLibrary("asyncProfiler");
|
||||
}
|
||||
}
|
||||
System.load(libPath);
|
||||
}
|
||||
|
||||
instance = profiler;
|
||||
return profiler;
|
||||
}
|
||||
|
||||
private static File extractEmbeddedLib() {
|
||||
String resourceName = "/" + getPlatformTag() + "/libasyncProfiler.so";
|
||||
InputStream in = AsyncProfiler.class.getResourceAsStream(resourceName);
|
||||
if (in == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
String extractPath = System.getProperty("one.profiler.extractPath");
|
||||
File file = File.createTempFile("libasyncProfiler-", ".so",
|
||||
extractPath == null || extractPath.isEmpty() ? null : new File(extractPath));
|
||||
try (FileOutputStream out = new FileOutputStream(file)) {
|
||||
byte[] buf = new byte[32000];
|
||||
for (int bytes; (bytes = in.read(buf)) >= 0; ) {
|
||||
out.write(buf, 0, bytes);
|
||||
}
|
||||
}
|
||||
return file;
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
} finally {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException e) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String getPlatformTag() {
|
||||
String os = System.getProperty("os.name").toLowerCase();
|
||||
String arch = System.getProperty("os.arch").toLowerCase();
|
||||
if (os.contains("linux")) {
|
||||
if (arch.equals("amd64") || arch.equals("x86_64") || arch.contains("x64")) {
|
||||
return "linux-x64";
|
||||
} else if (arch.equals("aarch64") || arch.contains("arm64")) {
|
||||
return "linux-arm64";
|
||||
} else if (arch.equals("aarch32") || arch.contains("arm")) {
|
||||
return "linux-arm32";
|
||||
} else if (arch.contains("86")) {
|
||||
return "linux-x86";
|
||||
} else if (arch.contains("ppc64")) {
|
||||
return "linux-ppc64le";
|
||||
}
|
||||
} else if (os.contains("mac")) {
|
||||
return "macos";
|
||||
}
|
||||
throw new UnsupportedOperationException("Unsupported platform: " + os + "-" + arch);
|
||||
instance = new AsyncProfiler();
|
||||
return instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start profiling
|
||||
*
|
||||
* @param event Profiling event, see {@link Events}
|
||||
* @param event Profiling event, see {@link Events}
|
||||
* @param interval Sampling interval, e.g. nanoseconds for Events.CPU
|
||||
* @throws IllegalStateException If profiler is already running
|
||||
*/
|
||||
@Override
|
||||
public void start(String event, long interval) throws IllegalStateException {
|
||||
if (event == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
start0(event, interval, true);
|
||||
}
|
||||
|
||||
@@ -125,15 +65,12 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
* Start or resume profiling without resetting collected data.
|
||||
* Note that event and interval may change since the previous profiling session.
|
||||
*
|
||||
* @param event Profiling event, see {@link Events}
|
||||
* @param event Profiling event, see {@link Events}
|
||||
* @param interval Sampling interval, e.g. nanoseconds for Events.CPU
|
||||
* @throws IllegalStateException If profiler is already running
|
||||
*/
|
||||
@Override
|
||||
public void resume(String event, long interval) throws IllegalStateException {
|
||||
if (event == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
start0(event, interval, false);
|
||||
}
|
||||
|
||||
@@ -176,13 +113,10 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
* @param command Profiling command
|
||||
* @return The command result
|
||||
* @throws IllegalArgumentException If failed to parse the command
|
||||
* @throws IOException If failed to create output file
|
||||
* @throws IOException If failed to create output file
|
||||
*/
|
||||
@Override
|
||||
public String execute(String command) throws IllegalArgumentException, IllegalStateException, IOException {
|
||||
if (command == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
public String execute(String command) throws IllegalArgumentException, IOException {
|
||||
return execute0(command);
|
||||
}
|
||||
|
||||
@@ -195,22 +129,7 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
@Override
|
||||
public String dumpCollapsed(Counter counter) {
|
||||
try {
|
||||
return execute0("collapsed," + counter.name().toLowerCase());
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump collected stack traces
|
||||
*
|
||||
* @param maxTraces Maximum number of stack traces to dump. 0 means no limit
|
||||
* @return Textual representation of the profile
|
||||
*/
|
||||
@Override
|
||||
public String dumpTraces(int maxTraces) {
|
||||
try {
|
||||
return execute0(maxTraces == 0 ? "traces" : "traces=" + maxTraces);
|
||||
return execute0("collapsed,counter=" + counter.name().toLowerCase());
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
@@ -225,7 +144,7 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
@Override
|
||||
public String dumpFlat(int maxMethods) {
|
||||
try {
|
||||
return execute0(maxMethods == 0 ? "flat" : "flat=" + maxMethods);
|
||||
return execute0("flat=" + maxMethods);
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
@@ -252,7 +171,7 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
}
|
||||
|
||||
private void filterThread(Thread thread, boolean enable) {
|
||||
if (thread == null || thread == Thread.currentThread()) {
|
||||
if (thread == null) {
|
||||
filterThread0(null, enable);
|
||||
} else {
|
||||
// Need to take lock to avoid race condition with a thread state change
|
||||
@@ -266,10 +185,7 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
}
|
||||
|
||||
private native void start0(String event, long interval, boolean reset) throws IllegalStateException;
|
||||
|
||||
private native void stop0() throws IllegalStateException;
|
||||
|
||||
private native String execute0(String command) throws IllegalArgumentException, IllegalStateException, IOException;
|
||||
|
||||
private native String execute0(String command) throws IllegalArgumentException, IOException;
|
||||
private native void filterThread0(Thread thread, boolean enable);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2018 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package one.profiler;
|
||||
@@ -24,9 +35,8 @@ public interface AsyncProfilerMXBean {
|
||||
long getSamples();
|
||||
String getVersion();
|
||||
|
||||
String execute(String command) throws IllegalArgumentException, IllegalStateException, java.io.IOException;
|
||||
String execute(String command) throws IllegalArgumentException, java.io.IOException;
|
||||
|
||||
String dumpCollapsed(Counter counter);
|
||||
String dumpTraces(int maxTraces);
|
||||
String dumpFlat(int maxMethods);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2018 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package one.profiler;
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2018 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package one.profiler;
|
||||
@@ -13,6 +24,5 @@ public class Events {
|
||||
public static final String ALLOC = "alloc";
|
||||
public static final String LOCK = "lock";
|
||||
public static final String WALL = "wall";
|
||||
public static final String CTIMER = "ctimer";
|
||||
public static final String ITIMER = "itimer";
|
||||
}
|
||||
|
||||
152
src/arch.h
@@ -1,29 +1,23 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2017 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef _ARCH_H
|
||||
#define _ARCH_H
|
||||
|
||||
|
||||
#ifndef likely
|
||||
# define likely(x) (__builtin_expect(!!(x), 1))
|
||||
#endif
|
||||
|
||||
#ifndef unlikely
|
||||
# define unlikely(x) (__builtin_expect(!!(x), 0))
|
||||
#endif
|
||||
|
||||
#define callerPC() __builtin_return_address(0)
|
||||
|
||||
#ifdef _LP64
|
||||
# define LP64_ONLY(code) code
|
||||
#else // !_LP64
|
||||
# define LP64_ONLY(code)
|
||||
#endif // _LP64
|
||||
|
||||
|
||||
typedef unsigned char u8;
|
||||
typedef unsigned short u16;
|
||||
typedef unsigned int u32;
|
||||
@@ -33,53 +27,32 @@ static inline u64 atomicInc(volatile u64& var, u64 increment = 1) {
|
||||
return __sync_fetch_and_add(&var, increment);
|
||||
}
|
||||
|
||||
static inline int atomicInc(volatile u32& var, int increment = 1) {
|
||||
return __sync_fetch_and_add(&var, increment);
|
||||
}
|
||||
|
||||
static inline int atomicInc(volatile int& var, int increment = 1) {
|
||||
return __sync_fetch_and_add(&var, increment);
|
||||
}
|
||||
|
||||
static inline u64 loadAcquire(u64& var) {
|
||||
return __atomic_load_n(&var, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
static inline void storeRelease(u64& var, u64 value) {
|
||||
return __atomic_store_n(&var, value, __ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
|
||||
typedef unsigned char instruction_t;
|
||||
const instruction_t BREAKPOINT = 0xcc;
|
||||
const int BREAKPOINT_OFFSET = 0;
|
||||
|
||||
const int SYSCALL_SIZE = 2;
|
||||
const int FRAME_PC_SLOT = 1;
|
||||
const int PROBE_SP_LIMIT = 4;
|
||||
const int PLT_HEADER_SIZE = 16;
|
||||
const int PLT_ENTRY_SIZE = 16;
|
||||
const int PERF_REG_PC = 8; // PERF_REG_X86_IP
|
||||
|
||||
#define spinPause() asm volatile("pause")
|
||||
#define rmb() asm volatile("lfence" : : : "memory")
|
||||
#define flushCache(addr) asm volatile("mfence; clflush (%0); mfence" : : "r" (addr) : "memory")
|
||||
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() ((void**)__builtin_frame_address(0) + 2)
|
||||
#define flushCache(addr) asm volatile("mfence; clflush (%0); mfence" : : "r"(addr) : "memory")
|
||||
|
||||
#elif defined(__arm__) || defined(__thumb__)
|
||||
|
||||
typedef unsigned int instruction_t;
|
||||
const instruction_t BREAKPOINT = 0xe7f001f0;
|
||||
const instruction_t BREAKPOINT_THUMB = 0xde01de01;
|
||||
const int BREAKPOINT_OFFSET = 0;
|
||||
|
||||
const int SYSCALL_SIZE = sizeof(instruction_t);
|
||||
const int FRAME_PC_SLOT = 1;
|
||||
const int PROBE_SP_LIMIT = 0;
|
||||
const int PLT_HEADER_SIZE = 20;
|
||||
const int PLT_ENTRY_SIZE = 12;
|
||||
const int PERF_REG_PC = 15; // PERF_REG_ARM_PC
|
||||
@@ -88,95 +61,20 @@ const int PERF_REG_PC = 15; // PERF_REG_ARM_PC
|
||||
#define rmb() asm volatile("dmb ish" : : : "memory")
|
||||
#define flushCache(addr) __builtin___clear_cache((char*)(addr), (char*)(addr) + sizeof(instruction_t))
|
||||
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() __builtin_frame_address(1)
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
|
||||
typedef unsigned int instruction_t;
|
||||
const instruction_t BREAKPOINT = 0xd4200000;
|
||||
const int BREAKPOINT_OFFSET = 0;
|
||||
|
||||
const int SYSCALL_SIZE = sizeof(instruction_t);
|
||||
const int FRAME_PC_SLOT = 1;
|
||||
const int PROBE_SP_LIMIT = 0;
|
||||
const int PLT_HEADER_SIZE = 32;
|
||||
const int PLT_ENTRY_SIZE = 16;
|
||||
const int PERF_REG_PC = 32; // PERF_REG_ARM64_PC
|
||||
|
||||
#define spinPause() asm volatile("isb")
|
||||
#define spinPause() asm volatile("yield")
|
||||
#define rmb() asm volatile("dmb ish" : : : "memory")
|
||||
#define flushCache(addr) __builtin___clear_cache((char*)(addr), (char*)(addr) + sizeof(instruction_t))
|
||||
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() __builtin_frame_address(1)
|
||||
|
||||
#elif defined(__PPC64__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||
|
||||
typedef unsigned int instruction_t;
|
||||
const instruction_t BREAKPOINT = 0x7fe00008;
|
||||
// We place the break point in the third instruction slot on PPCLE as the first two are skipped if
|
||||
// the call comes from within the same compilation unit according to the LE ABI.
|
||||
const int BREAKPOINT_OFFSET = 8;
|
||||
|
||||
const int SYSCALL_SIZE = sizeof(instruction_t);
|
||||
const int FRAME_PC_SLOT = 2;
|
||||
const int PROBE_SP_LIMIT = 0;
|
||||
const int PLT_HEADER_SIZE = 24;
|
||||
const int PLT_ENTRY_SIZE = 24;
|
||||
const int PERF_REG_PC = 32; // PERF_REG_POWERPC_NIP
|
||||
|
||||
#define spinPause() asm volatile("yield") // does nothing, but using or 1,1,1 would lead to other problems
|
||||
#define rmb() asm volatile ("sync" : : : "memory") // lwsync would do but better safe than sorry
|
||||
#define flushCache(addr) __builtin___clear_cache((char*)(addr), (char*)(addr) + sizeof(instruction_t))
|
||||
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() __builtin_frame_address(0)
|
||||
|
||||
#elif defined(__riscv) && (__riscv_xlen == 64)
|
||||
|
||||
typedef unsigned int instruction_t;
|
||||
#if defined(__riscv_compressed)
|
||||
const instruction_t BREAKPOINT = 0x9002; // EBREAK (compressed form)
|
||||
#else
|
||||
const instruction_t BREAKPOINT = 0x00100073; // EBREAK
|
||||
#endif
|
||||
const int BREAKPOINT_OFFSET = 0;
|
||||
|
||||
const int SYSCALL_SIZE = sizeof(instruction_t);
|
||||
const int FRAME_PC_SLOT = 1; // return address is at -1 from FP
|
||||
const int PROBE_SP_LIMIT = 0;
|
||||
const int PLT_HEADER_SIZE = 24; // Best guess from examining readelf
|
||||
const int PLT_ENTRY_SIZE = 24; // ...same...
|
||||
const int PERF_REG_PC = 0; // PERF_REG_RISCV_PC
|
||||
|
||||
#define spinPause() // No architecture support
|
||||
#define rmb() asm volatile ("fence" : : : "memory")
|
||||
#define flushCache(addr) __builtin___clear_cache((char*)(addr), (char*)(addr) + sizeof(instruction_t))
|
||||
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() __builtin_frame_address(0)
|
||||
|
||||
#elif defined(__loongarch_lp64)
|
||||
|
||||
typedef unsigned int instruction_t;
|
||||
const instruction_t BREAKPOINT = 0x002a0005; // EBREAK
|
||||
const int BREAKPOINT_OFFSET = 0;
|
||||
|
||||
const int SYSCALL_SIZE = sizeof(instruction_t);
|
||||
const int FRAME_PC_SLOT = 1;
|
||||
const int PROBE_SP_LIMIT = 0;
|
||||
const int PLT_HEADER_SIZE = 32;
|
||||
const int PLT_ENTRY_SIZE = 16;
|
||||
const int PERF_REG_PC = 0; // PERF_REG_LOONGARCH_PC
|
||||
|
||||
#define spinPause() asm volatile("ibar 0x0")
|
||||
#define rmb() asm volatile("dbar 0x0" : : : "memory")
|
||||
#define flushCache(addr) __builtin___clear_cache((char*)(addr), (char*)(addr) + sizeof(instruction_t))
|
||||
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() __builtin_frame_address(0)
|
||||
|
||||
#else
|
||||
|
||||
#error "Compiling on unsupported arch"
|
||||
@@ -184,24 +82,4 @@ const int PERF_REG_PC = 0; // PERF_REG_LOONGARCH_PC
|
||||
#endif
|
||||
|
||||
|
||||
// On Apple M1 and later processors, memory is either writable or executable (W^X)
|
||||
#if defined(__aarch64__) && defined(__APPLE__)
|
||||
# define WX_MEMORY true
|
||||
#else
|
||||
# define WX_MEMORY false
|
||||
#endif
|
||||
|
||||
// Pointer authentication (PAC) support.
|
||||
// Only 48-bit virtual addresses are currently supported.
|
||||
#ifdef __aarch64__
|
||||
const unsigned long PAC_MASK = WX_MEMORY ? 0x7fffffffffffUL : 0xffffffffffffUL;
|
||||
|
||||
static inline const void* stripPointer(const void* p) {
|
||||
return (const void*) ((unsigned long)p & PAC_MASK);
|
||||
}
|
||||
#else
|
||||
# define stripPointer(p) (p)
|
||||
#endif
|
||||
|
||||
|
||||
#endif // _ARCH_H
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2017 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <limits.h>
|
||||
@@ -11,107 +22,68 @@
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include "arguments.h"
|
||||
#include "os.h"
|
||||
|
||||
|
||||
// Arguments of the last start/resume command; reused for shutdown and restart
|
||||
Arguments _global_args;
|
||||
|
||||
// Predefined value that denotes successful operation
|
||||
const Error Error::OK(NULL);
|
||||
|
||||
// Extra buffer space for expanding file pattern
|
||||
const size_t EXTRA_BUF_SIZE = 512;
|
||||
|
||||
static const Multiplier NANOS[] = {{'n', 1}, {'u', 1000}, {'m', 1000000}, {'s', 1000000000}, {0, 0}};
|
||||
static const Multiplier BYTES[] = {{'b', 1}, {'k', 1024}, {'m', 1048576}, {'g', 1073741824}, {0, 0}};
|
||||
static const Multiplier SECONDS[] = {{'s', 1}, {'m', 60}, {'h', 3600}, {'d', 86400}, {0, 0}};
|
||||
static const Multiplier UNIVERSAL[] = {{'n', 1}, {'u', 1000}, {'m', 1000000}, {'s', 1000000000}, {'b', 1}, {'k', 1024}, {'g', 1073741824}, {0, 0}};
|
||||
|
||||
|
||||
// Statically compute hash code of a string containing up to 12 [a-z] letters
|
||||
#define HASH(s) ((s[0] & 31LL) | (s[1] & 31LL) << 5 | (s[2] & 31LL) << 10 | (s[3] & 31LL) << 15 | \
|
||||
(s[4] & 31LL) << 20 | (s[5] & 31LL) << 25 | (s[6] & 31LL) << 30 | (s[7] & 31LL) << 35 | \
|
||||
(s[8] & 31LL) << 40 | (s[9] & 31LL) << 45 | (s[10] & 31LL) << 50 | (s[11] & 31LL) << 55)
|
||||
#define HASH(s) (HASH12(s " "))
|
||||
|
||||
#define HASH12(s) (s[0] & 31LL) | (s[1] & 31LL) << 5 | (s[2] & 31LL) << 10 | (s[3] & 31LL) << 15 | \
|
||||
(s[4] & 31LL) << 20 | (s[5] & 31LL) << 25 | (s[6] & 31LL) << 30 | (s[7] & 31LL) << 35 | \
|
||||
(s[8] & 31LL) << 40 | (s[9] & 31LL) << 45 | (s[10] & 31LL) << 50 | (s[11] & 31LL) << 55
|
||||
|
||||
// Simulate switch statement over string hashes
|
||||
#define SWITCH(arg) long long arg_hash = hash(arg); if (0)
|
||||
|
||||
#define CASE(s) } else if (arg_hash == HASH(s " ")) {
|
||||
#define CASE(s) } else if (arg_hash == HASH(s)) {
|
||||
|
||||
#define DEFAULT() } else {
|
||||
#define CASE2(s1, s2) } else if (arg_hash == HASH(s1) || arg_hash == HASH(s2)) {
|
||||
|
||||
|
||||
// Parses agent arguments.
|
||||
// The format of the string is:
|
||||
// arg[,arg...]
|
||||
// where arg is one of the following options:
|
||||
// start - start profiling
|
||||
// resume - start or resume profiling without resetting collected data
|
||||
// stop - stop profiling
|
||||
// dump - dump collected data without stopping profiling session
|
||||
// check - check if the specified profiling event is available
|
||||
// status - print profiling status (inactive / running for X seconds)
|
||||
// meminfo - print profiler memory stats
|
||||
// list - show the list of available profiling events
|
||||
// version - display the agent version
|
||||
// event=EVENT - which event to trace (cpu, wall, cache-misses, etc.)
|
||||
// alloc[=BYTES] - profile allocations with BYTES interval
|
||||
// live - build allocation profile from live objects only
|
||||
// lock[=DURATION] - profile contended locks overflowing the DURATION ns bucket (default: 10us)
|
||||
// wall[=NS] - run wall clock profiling together with CPU profiling
|
||||
// nobatch - legacy wall clock sampling without batch events
|
||||
// collapsed - dump collapsed stacks (the format used by FlameGraph script)
|
||||
// flamegraph - produce Flame Graph in HTML format
|
||||
// tree - produce call tree in HTML format
|
||||
// jfr - dump events in Java Flight Recorder format
|
||||
// jfropts=OPTIONS - JFR recording options: numeric bitmask or 'mem'
|
||||
// jfrsync[=CONFIG] - start Java Flight Recording with the given config along with the profiler
|
||||
// traces[=N] - dump top N call traces
|
||||
// flat[=N] - dump top N methods (aka flat profile)
|
||||
// samples - count the number of samples (default)
|
||||
// total - count the total value (time, bytes, etc.) instead of samples
|
||||
// chunksize=N - approximate size of JFR chunk in bytes (default: 100 MB)
|
||||
// chunktime=N - duration of JFR chunk in seconds (default: 1 hour)
|
||||
// timeout=TIME - automatically stop profiler at TIME (absolute or relative)
|
||||
// loop=TIME - run profiler in a loop (continuous profiling)
|
||||
// interval=N - sampling interval in ns (default: 10'000'000, i.e. 10 ms)
|
||||
// jstackdepth=N - maximum Java stack depth (default: 2048)
|
||||
// signal=N - use alternative signal for cpu or wall clock profiling
|
||||
// features=LIST - advanced stack trace features (vtable, comptask, pcaddr)"
|
||||
// safemode=BITS - disable stack recovery techniques (default: 0, i.e. everything enabled)
|
||||
// file=FILENAME - output file name for dumping
|
||||
// log=FILENAME - log warnings and errors to the given dedicated stream
|
||||
// loglevel=LEVEL - logging level: TRACE, DEBUG, INFO, WARN, ERROR, or NONE
|
||||
// quiet - do not log "Profiling started/stopped" message
|
||||
// server=ADDRESS - start insecure HTTP server at ADDRESS/PORT
|
||||
// filter=FILTER - thread filter
|
||||
// threads - profile different threads separately
|
||||
// sched - group threads by scheduling policy
|
||||
// cstack=MODE - how to collect C stack frames in addition to Java stack
|
||||
// MODE is 'fp', 'dwarf', 'lbr', 'vm' or 'no'
|
||||
// clock=SOURCE - clock source for JFR timestamps: 'tsc' or 'monotonic'
|
||||
// alluser - include only user-mode events
|
||||
// fdtransfer - use fdtransfer to pass fds to the profiler
|
||||
// target-cpu=CPU - sample threads on a specific CPU (perf_events only, default: -1)
|
||||
// record-cpu - record which cpu a sample was taken on
|
||||
// simple - simple class names instead of FQN
|
||||
// dot - dotted class names
|
||||
// norm - normalize names of hidden classes / lambdas
|
||||
// sig - print method signatures
|
||||
// ann - annotate Java methods
|
||||
// lib - prepend library names
|
||||
// mcache - max age of jmethodID cache (default: 0 = disabled)
|
||||
// include=PATTERN - include stack traces containing PATTERN
|
||||
// exclude=PATTERN - exclude stack traces containing PATTERN
|
||||
// begin=FUNCTION - begin profiling when FUNCTION is executed
|
||||
// end=FUNCTION - end profiling when FUNCTION is executed
|
||||
// nostop - do not stop profiling outside --begin/--end window
|
||||
// title=TITLE - FlameGraph title
|
||||
// minwidth=PCT - FlameGraph minimum frame width in percent
|
||||
// reverse - generate stack-reversed FlameGraph / Call tree (defaults to icicle graph)
|
||||
// inverted - toggles the layout for reversed stacktraces from icicle to flamegraph
|
||||
// and for default stacktraces from flamegraph to icicle
|
||||
// start - start profiling
|
||||
// resume - start or resume profiling without resetting collected data
|
||||
// stop - stop profiling
|
||||
// check - check if the specified profiling event is available
|
||||
// status - print profiling status (inactive / running for X seconds)
|
||||
// list - show the list of available profiling events
|
||||
// version[=full] - display the agent version
|
||||
// event=EVENT - which event to trace (cpu, alloc, lock, cache-misses etc.)
|
||||
// collapsed[=C] - dump collapsed stacks (the format used by FlameGraph script)
|
||||
// html[=C] - produce Flame Graph in HTML format
|
||||
// tree[=C] - produce call tree in HTML format
|
||||
// C is counter type: 'samples' or 'total'
|
||||
// jfr - dump events in Java Flight Recorder format
|
||||
// flat[=N] - dump top N methods (aka flat profile)
|
||||
// interval=N - sampling interval in ns (default: 10'000'000, i.e. 10 ms)
|
||||
// jstackdepth=N - maximum Java stack depth (default: 2048)
|
||||
// safemode=BITS - disable stack recovery techniques (default: 0, i.e. everything enabled)
|
||||
// file=FILENAME - output file name for dumping
|
||||
// filter=FILTER - thread filter
|
||||
// threads - profile different threads separately
|
||||
// cstack=MODE - how to collect C stack frames in addition to Java stack
|
||||
// MODE is 'fp' (Frame Pointer), 'lbr' (Last Branch Record) or 'no'
|
||||
// allkernel - include only kernel-mode events
|
||||
// alluser - include only user-mode events
|
||||
// simple - simple class names instead of FQN
|
||||
// dot - dotted class names
|
||||
// sig - print method signatures
|
||||
// ann - annotate Java method names
|
||||
// include=PATTERN - include stack traces containing PATTERN
|
||||
// exclude=PATTERN - exclude stack traces containing PATTERN
|
||||
// begin=FUNCTION - begin profiling when FUNCTION is executed
|
||||
// end=FUNCTION - end profiling when FUNCTION is executed
|
||||
// title=TITLE - FlameGraph title
|
||||
// minwidth=PCT - FlameGraph minimum frame width in percent
|
||||
// reverse - generate stack-reversed FlameGraph / Call tree
|
||||
//
|
||||
// It is possible to specify multiple dump options at the same time
|
||||
|
||||
@@ -122,15 +94,13 @@ Error Arguments::parse(const char* args) {
|
||||
|
||||
size_t len = strlen(args);
|
||||
free(_buf);
|
||||
_buf = (char*)malloc(len + EXTRA_BUF_SIZE + 1);
|
||||
_buf = (char*)malloc(len + EXTRA_BUF_SIZE);
|
||||
if (_buf == NULL) {
|
||||
return Error("Not enough memory to parse arguments");
|
||||
}
|
||||
char* args_copy = strcpy(_buf + EXTRA_BUF_SIZE, args);
|
||||
strcpy(_buf, args);
|
||||
|
||||
const char* msg = NULL;
|
||||
|
||||
for (char* arg = strtok(args_copy, ","); arg != NULL; arg = strtok(NULL, ",")) {
|
||||
for (char* arg = strtok(_buf, ","); arg != NULL; arg = strtok(NULL, ",")) {
|
||||
char* value = strchr(arg, '=');
|
||||
if (value != NULL) *value++ = 0;
|
||||
|
||||
@@ -145,274 +115,97 @@ Error Arguments::parse(const char* args) {
|
||||
CASE("stop")
|
||||
_action = ACTION_STOP;
|
||||
|
||||
CASE("dump")
|
||||
_action = ACTION_DUMP;
|
||||
|
||||
CASE("check")
|
||||
_action = ACTION_CHECK;
|
||||
|
||||
CASE("status")
|
||||
_action = ACTION_STATUS;
|
||||
|
||||
CASE("meminfo")
|
||||
_action = ACTION_MEMINFO;
|
||||
|
||||
CASE("list")
|
||||
_action = ACTION_LIST;
|
||||
|
||||
CASE("version")
|
||||
_action = ACTION_VERSION;
|
||||
_action = value == NULL ? ACTION_VERSION : ACTION_FULL_VERSION;
|
||||
|
||||
// Output formats
|
||||
CASE("collapsed")
|
||||
CASE2("collapsed", "folded")
|
||||
_output = OUTPUT_COLLAPSED;
|
||||
_counter = value == NULL || strcmp(value, "samples") == 0 ? COUNTER_SAMPLES : COUNTER_TOTAL;
|
||||
|
||||
CASE("flamegraph")
|
||||
CASE2("flamegraph", "html")
|
||||
_output = OUTPUT_FLAMEGRAPH;
|
||||
_counter = value == NULL || strcmp(value, "samples") == 0 ? COUNTER_SAMPLES : COUNTER_TOTAL;
|
||||
|
||||
CASE("tree")
|
||||
_output = OUTPUT_TREE;
|
||||
_counter = value == NULL || strcmp(value, "samples") == 0 ? COUNTER_SAMPLES : COUNTER_TOTAL;
|
||||
|
||||
CASE("jfr")
|
||||
_output = OUTPUT_JFR;
|
||||
|
||||
CASE("jfropts")
|
||||
_output = OUTPUT_JFR;
|
||||
if (value == NULL) {
|
||||
msg = "Invalid jfropts";
|
||||
} else if (value[0] >= '0' && value[0] <= '9') {
|
||||
_jfr_options = (int)strtol(value, NULL, 0);
|
||||
} else if (strstr(value, "mem")) {
|
||||
_jfr_options |= IN_MEMORY;
|
||||
}
|
||||
|
||||
CASE("jfrsync")
|
||||
_output = OUTPUT_JFR;
|
||||
_jfr_options |= JFR_SYNC_OPTS;
|
||||
_jfr_sync = value == NULL ? "default" : value;
|
||||
|
||||
CASE("traces")
|
||||
_output = OUTPUT_TEXT;
|
||||
_dump_traces = value == NULL ? INT_MAX : atoi(value);
|
||||
|
||||
CASE("flat")
|
||||
_output = OUTPUT_TEXT;
|
||||
_output = OUTPUT_FLAT;
|
||||
_dump_flat = value == NULL ? INT_MAX : atoi(value);
|
||||
|
||||
CASE("samples")
|
||||
_counter = COUNTER_SAMPLES;
|
||||
|
||||
CASE("total")
|
||||
_counter = COUNTER_TOTAL;
|
||||
|
||||
CASE("chunksize")
|
||||
if (value == NULL || (_chunk_size = parseUnits(value, BYTES)) < 0) {
|
||||
msg = "Invalid chunksize";
|
||||
}
|
||||
|
||||
CASE("chunktime")
|
||||
if (value == NULL || (_chunk_time = parseUnits(value, SECONDS)) < 0) {
|
||||
msg = "Invalid chunktime";
|
||||
}
|
||||
|
||||
// Basic options
|
||||
CASE("event")
|
||||
if (value == NULL || value[0] == 0) {
|
||||
msg = "event must not be empty";
|
||||
} else if (strcmp(value, EVENT_ALLOC) == 0) {
|
||||
if (_alloc < 0) _alloc = 0;
|
||||
} else if (strcmp(value, EVENT_NATIVEMEM) == 0) {
|
||||
if (_nativemem < 0) _nativemem = 0;
|
||||
} else if (strcmp(value, EVENT_LOCK) == 0) {
|
||||
if (_lock < 0) _lock = DEFAULT_LOCK_INTERVAL;
|
||||
} else if (_event != NULL && !_all) {
|
||||
msg = "Duplicate event argument";
|
||||
} else {
|
||||
_event = value;
|
||||
return Error("event must not be empty");
|
||||
}
|
||||
|
||||
CASE("timeout")
|
||||
if (value == NULL || (_timeout = parseTimeout(value)) == -1) {
|
||||
msg = "Invalid timeout";
|
||||
}
|
||||
|
||||
CASE("loop")
|
||||
_loop = true;
|
||||
if (value == NULL || (_timeout = parseTimeout(value)) == -1) {
|
||||
msg = "Invalid loop duration";
|
||||
}
|
||||
|
||||
CASE("alloc")
|
||||
_alloc = value == NULL ? 0 : parseUnits(value, BYTES);
|
||||
|
||||
CASE("nativemem")
|
||||
_nativemem = value == NULL ? 0 : parseUnits(value, BYTES);
|
||||
|
||||
CASE("nofree")
|
||||
_nofree = true;
|
||||
|
||||
CASE("lock")
|
||||
_lock = value == NULL ? DEFAULT_LOCK_INTERVAL : parseUnits(value, NANOS);
|
||||
|
||||
CASE("wall")
|
||||
_wall = value == NULL ? 0 : parseUnits(value, NANOS);
|
||||
|
||||
CASE("cpu")
|
||||
if (_event != NULL) {
|
||||
msg = "Duplicate event argument";
|
||||
} else {
|
||||
_event = EVENT_CPU;
|
||||
}
|
||||
|
||||
CASE("all")
|
||||
_all = true;
|
||||
_live = true;
|
||||
if (_wall < 0) {
|
||||
_wall = 0;
|
||||
}
|
||||
if (_alloc < 0) {
|
||||
_alloc = 0;
|
||||
}
|
||||
if (_lock < 0) {
|
||||
_lock = DEFAULT_LOCK_INTERVAL;
|
||||
}
|
||||
if (_nativemem < 0) {
|
||||
_nativemem = DEFAULT_ALLOC_INTERVAL;
|
||||
}
|
||||
if (_event == NULL && OS::isLinux()) {
|
||||
_event = EVENT_CPU;
|
||||
if (!addEvent(value)) {
|
||||
return Error("multiple incompatible events");
|
||||
}
|
||||
|
||||
CASE("interval")
|
||||
if (value == NULL || (_interval = parseUnits(value, UNIVERSAL)) <= 0) {
|
||||
msg = "Invalid interval";
|
||||
if (value == NULL || (_interval = parseUnits(value)) <= 0) {
|
||||
return Error("Invalid interval");
|
||||
}
|
||||
|
||||
CASE("jstackdepth")
|
||||
if (value == NULL || (_jstackdepth = atoi(value)) <= 0) {
|
||||
msg = "jstackdepth must be > 0";
|
||||
return Error("jstackdepth must be > 0");
|
||||
}
|
||||
|
||||
CASE("signal")
|
||||
if (value == NULL || (_signal = atoi(value)) <= 0) {
|
||||
msg = "signal must be > 0";
|
||||
} else if ((value = strchr(value, '/')) != NULL) {
|
||||
// Two signals were specified: one for CPU profiling, another for wall clock
|
||||
_signal |= atoi(value + 1) << 8;
|
||||
}
|
||||
|
||||
CASE("features")
|
||||
if (value != NULL) {
|
||||
if (strstr(value, "stats")) _features.stats = 1;
|
||||
if (strstr(value, "probesp")) _features.probe_sp = 1;
|
||||
if (strstr(value, "vtable")) _features.vtable_target = 1;
|
||||
if (strstr(value, "comptask")) _features.comp_task = 1;
|
||||
if (strstr(value, "pcaddr")) _features.pc_addr = 1;
|
||||
}
|
||||
|
||||
CASE("safemode") {
|
||||
// Left for compatibility purpose; will be eventually migrated to 'features'
|
||||
int bits = value == NULL ? INT_MAX : (int)strtol(value, NULL, 0);
|
||||
_features.unknown_java = (bits & 1) ? 0 : 1;
|
||||
_features.unwind_stub = (bits & 2) ? 0 : 1;
|
||||
_features.unwind_comp = (bits & 4) ? 0 : 1;
|
||||
_features.unwind_native = (bits & 8) ? 0 : 1;
|
||||
_features.java_anchor = (bits & 16) ? 0 : 1;
|
||||
_features.gc_traces = (bits & 32) ? 0 : 1;
|
||||
}
|
||||
CASE("safemode")
|
||||
_safe_mode = value == NULL ? INT_MAX : atoi(value);
|
||||
|
||||
CASE("file")
|
||||
if (value == NULL || value[0] == 0) {
|
||||
msg = "file must not be empty";
|
||||
return Error("file must not be empty");
|
||||
}
|
||||
_file = value;
|
||||
|
||||
CASE("log")
|
||||
_log = value == NULL || value[0] == 0 ? NULL : value;
|
||||
|
||||
CASE("loglevel")
|
||||
if (value == NULL || value[0] == 0) {
|
||||
msg = "loglevel must not be empty";
|
||||
}
|
||||
_loglevel = value;
|
||||
|
||||
CASE("quiet")
|
||||
_quiet = true;
|
||||
|
||||
CASE("server")
|
||||
if (value == NULL || value[0] == 0) {
|
||||
msg = "server address must not be empty";
|
||||
}
|
||||
_server = value;
|
||||
|
||||
CASE("fdtransfer")
|
||||
_fdtransfer = true;
|
||||
if (value == NULL || value[0] == 0) {
|
||||
msg = "fdtransfer path must not be empty";
|
||||
}
|
||||
_fdtransfer_path = value;
|
||||
|
||||
// Filters
|
||||
CASE("filter")
|
||||
_filter = value == NULL ? "" : value;
|
||||
|
||||
CASE("include")
|
||||
// Workaround -Wstringop-overflow warning
|
||||
if (value == arg + 8) appendToEmbeddedList(_include, arg + 8);
|
||||
if (value != NULL) appendToEmbeddedList(_include, value);
|
||||
|
||||
CASE("exclude")
|
||||
// Workaround -Wstringop-overflow warning
|
||||
if (value == arg + 8) appendToEmbeddedList(_exclude, arg + 8);
|
||||
if (value != NULL) appendToEmbeddedList(_exclude, value);
|
||||
|
||||
CASE("threads")
|
||||
_threads = true;
|
||||
|
||||
CASE("sched")
|
||||
_sched = true;
|
||||
|
||||
CASE("record-cpu")
|
||||
_record_cpu = true;
|
||||
|
||||
CASE("live")
|
||||
_live = true;
|
||||
|
||||
CASE("nobatch")
|
||||
_nobatch = true;
|
||||
CASE("allkernel")
|
||||
_ring = RING_KERNEL;
|
||||
|
||||
CASE("alluser")
|
||||
_alluser = true;
|
||||
_ring = RING_USER;
|
||||
|
||||
CASE("cstack")
|
||||
if (value != NULL) {
|
||||
if (strcmp(value, "fp") == 0) {
|
||||
_cstack = CSTACK_FP;
|
||||
} else if (strcmp(value, "dwarf") == 0) {
|
||||
_cstack = CSTACK_DWARF;
|
||||
} else if (strcmp(value, "lbr") == 0) {
|
||||
_cstack = CSTACK_LBR;
|
||||
} else if (strcmp(value, "vm") == 0) {
|
||||
_cstack = CSTACK_VM;
|
||||
} else if (strcmp(value, "vmx") == 0) {
|
||||
_cstack = CSTACK_VMX;
|
||||
} else {
|
||||
if (value[0] == 'n') {
|
||||
_cstack = CSTACK_NO;
|
||||
} else if (value[0] == 'l') {
|
||||
_cstack = CSTACK_LBR;
|
||||
} else {
|
||||
_cstack = CSTACK_FP;
|
||||
}
|
||||
}
|
||||
|
||||
CASE("clock")
|
||||
if (value != NULL) {
|
||||
if (value[0] == 't') {
|
||||
_clock = CLK_TSC;
|
||||
} else if (value[0] == 'm') {
|
||||
_clock = CLK_MONOTONIC;
|
||||
}
|
||||
}
|
||||
|
||||
CASE("target-cpu")
|
||||
if (value == NULL || (_target_cpu = atoi(value)) < 0) {
|
||||
_target_cpu = -1;
|
||||
}
|
||||
|
||||
// Output style modifiers
|
||||
CASE("simple")
|
||||
_style |= STYLE_SIMPLE;
|
||||
@@ -420,83 +213,59 @@ Error Arguments::parse(const char* args) {
|
||||
CASE("dot")
|
||||
_style |= STYLE_DOTTED;
|
||||
|
||||
CASE("norm")
|
||||
_style |= STYLE_NORMALIZE;
|
||||
|
||||
CASE("sig")
|
||||
_style |= STYLE_SIGNATURES;
|
||||
|
||||
CASE("ann")
|
||||
_style |= STYLE_ANNOTATE;
|
||||
|
||||
CASE("lib")
|
||||
_style |= STYLE_LIB_NAMES;
|
||||
|
||||
CASE("mcache")
|
||||
_mcache = value == NULL ? 1 : (unsigned char)strtol(value, NULL, 0);
|
||||
|
||||
CASE("begin")
|
||||
_begin = value;
|
||||
|
||||
CASE("end")
|
||||
_end = value;
|
||||
|
||||
CASE("nostop")
|
||||
_nostop = true;
|
||||
|
||||
// FlameGraph options
|
||||
CASE("title")
|
||||
_title = value;
|
||||
if (value != NULL) _title = value;
|
||||
|
||||
CASE("minwidth")
|
||||
if (value != NULL) _minwidth = atof(value);
|
||||
|
||||
CASE("reverse")
|
||||
_reverse = true;
|
||||
|
||||
CASE("inverted")
|
||||
_inverted = true;
|
||||
|
||||
DEFAULT()
|
||||
if (_unknown_arg == NULL) _unknown_arg = arg;
|
||||
}
|
||||
}
|
||||
|
||||
// Return error only after parsing all arguments, when 'log' is already set
|
||||
if (msg != NULL) {
|
||||
return Error(msg);
|
||||
}
|
||||
|
||||
if (_event == NULL && _alloc < 0 && _lock < 0 && _wall < 0 && _nativemem < 0) {
|
||||
_event = EVENT_CPU;
|
||||
if (_file != NULL && strchr(_file, '%') != NULL) {
|
||||
_file = expandFilePattern(_buf + len + 1, EXTRA_BUF_SIZE - 1, _file);
|
||||
}
|
||||
|
||||
if (_file != NULL && _output == OUTPUT_NONE) {
|
||||
_output = detectOutputFormat(_file);
|
||||
if (_output == OUTPUT_SVG) {
|
||||
return Error("SVG format is obsolete, use .html for FlameGraph");
|
||||
}
|
||||
_dump_traces = 100;
|
||||
_dump_flat = 200;
|
||||
}
|
||||
|
||||
if (_action == ACTION_NONE && _output != OUTPUT_NONE) {
|
||||
if (_output != OUTPUT_NONE && (_action == ACTION_NONE || _action == ACTION_STOP)) {
|
||||
_action = ACTION_DUMP;
|
||||
}
|
||||
|
||||
return Error::OK;
|
||||
}
|
||||
|
||||
const char* Arguments::file() {
|
||||
if (_file != NULL && strchr(_file, '%') != NULL) {
|
||||
return expandFilePattern(_file);
|
||||
bool Arguments::addEvent(const char* event) {
|
||||
if (strcmp(event, EVENT_ALLOC) == 0) {
|
||||
_events |= EK_ALLOC;
|
||||
} else if (strcmp(event, EVENT_LOCK) == 0) {
|
||||
_events |= EK_LOCK;
|
||||
} else {
|
||||
if (_events & EK_CPU) {
|
||||
return false;
|
||||
}
|
||||
_events |= EK_CPU;
|
||||
_event_desc = event;
|
||||
}
|
||||
return _file;
|
||||
}
|
||||
|
||||
// Returns true if the log file is a temporary file of asprof launcher
|
||||
bool Arguments::hasTemporaryLog() const {
|
||||
return _log != NULL && strncmp(_log, "/tmp/asprof-log.", 16) == 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
// The linked list of string offsets is embedded right into _buf array
|
||||
@@ -514,14 +283,11 @@ long long Arguments::hash(const char* arg) {
|
||||
return h;
|
||||
}
|
||||
|
||||
// Expands the following patterns:
|
||||
// %p process id
|
||||
// %t timestamp (yyyyMMdd-hhmmss)
|
||||
// %n{MAX} sequence number
|
||||
// %{ENV} environment variable
|
||||
const char* Arguments::expandFilePattern(const char* pattern) {
|
||||
char* ptr = _buf;
|
||||
char* end = _buf + EXTRA_BUF_SIZE - 1;
|
||||
// Expands %p to the process id
|
||||
// %t to the timestamp
|
||||
const char* Arguments::expandFilePattern(char* dest, size_t max_size, const char* pattern) {
|
||||
char* ptr = dest;
|
||||
char* end = dest + max_size - 1;
|
||||
|
||||
while (ptr < end && *pattern != 0) {
|
||||
char c = *pattern++;
|
||||
@@ -540,35 +306,13 @@ const char* Arguments::expandFilePattern(const char* pattern) {
|
||||
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
|
||||
t.tm_hour, t.tm_min, t.tm_sec);
|
||||
continue;
|
||||
} else if (c == 'n') {
|
||||
unsigned int max_files = 0;
|
||||
const char* p;
|
||||
if (*pattern == '{' && (p = strchr(pattern, '}')) != NULL) {
|
||||
max_files = atoi(pattern + 1);
|
||||
pattern = p + 1;
|
||||
}
|
||||
ptr += snprintf(ptr, end - ptr, "%u", max_files > 0 ? _file_num % max_files : _file_num);
|
||||
continue;
|
||||
} else if (c == '{') {
|
||||
char env_key[128];
|
||||
const char* p = strchr(pattern, '}');
|
||||
if (p != NULL && p - pattern < sizeof(env_key)) {
|
||||
memcpy(env_key, pattern, p - pattern);
|
||||
env_key[p - pattern] = 0;
|
||||
const char* env_value = getenv(env_key);
|
||||
if (env_value != NULL) {
|
||||
ptr += snprintf(ptr, end - ptr, "%s", env_value);
|
||||
pattern = p + 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*ptr++ = c;
|
||||
}
|
||||
|
||||
*(ptr < end ? ptr : end) = 0;
|
||||
return _buf;
|
||||
*ptr = 0;
|
||||
return dest;
|
||||
}
|
||||
|
||||
Output Arguments::detectOutputFormat(const char* file) {
|
||||
@@ -580,57 +324,37 @@ Output Arguments::detectOutputFormat(const char* file) {
|
||||
return OUTPUT_JFR;
|
||||
} else if (strcmp(ext, ".collapsed") == 0 || strcmp(ext, ".folded") == 0) {
|
||||
return OUTPUT_COLLAPSED;
|
||||
} else if (strcmp(ext, ".svg") == 0) {
|
||||
return OUTPUT_SVG;
|
||||
}
|
||||
}
|
||||
return OUTPUT_TEXT;
|
||||
return OUTPUT_FLAT;
|
||||
}
|
||||
|
||||
long Arguments::parseUnits(const char* str, const Multiplier* multipliers) {
|
||||
long Arguments::parseUnits(const char* str) {
|
||||
char* end;
|
||||
long result = strtol(str, &end, 0);
|
||||
if (end == str) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
char c = *end;
|
||||
if (c == 0) {
|
||||
return result;
|
||||
}
|
||||
if (c >= 'A' && c <= 'Z') {
|
||||
c += 'a' - 'A';
|
||||
}
|
||||
|
||||
for (const Multiplier* m = multipliers; m->symbol; m++) {
|
||||
if (c == m->symbol) {
|
||||
return result * m->multiplier;
|
||||
}
|
||||
switch (*end) {
|
||||
case 0:
|
||||
return result;
|
||||
case 'K': case 'k':
|
||||
case 'U': case 'u': // microseconds
|
||||
return result * 1000;
|
||||
case 'M': case 'm': // million, megabytes or milliseconds
|
||||
return result * 1000000;
|
||||
case 'G': case 'g':
|
||||
case 'S': case 's': // seconds
|
||||
return result * 1000000000;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int Arguments::parseTimeout(const char* str) {
|
||||
const char* p = strchr(str, ':');
|
||||
if (p == NULL) {
|
||||
return parseUnits(str, SECONDS);
|
||||
}
|
||||
|
||||
int hh = str[0] >= '0' && str[0] <= '2' ? atoi(str) : 0xff;
|
||||
int mm = p[1] >= '0' && p[1] <= '5' ? atoi(p + 1) : 0xff;
|
||||
int ss = (p = strchr(p + 1, ':')) != NULL && p[1] >= '0' && p[1] <= '5' ? atoi(p + 1) : 0xff;
|
||||
return 0xff000000 | hh << 16 | mm << 8 | ss;
|
||||
}
|
||||
|
||||
Arguments::~Arguments() {
|
||||
if (!_shared) free(_buf);
|
||||
}
|
||||
|
||||
void Arguments::save() {
|
||||
if (this != &_global_args) {
|
||||
free(_global_args._buf);
|
||||
_global_args = *this;
|
||||
_shared = true;
|
||||
}
|
||||
void Arguments::save(Arguments& other) {
|
||||
if (!_shared) free(_buf);
|
||||
*this = other;
|
||||
other._shared = true;
|
||||
}
|
||||
|
||||
241
src/arguments.h
@@ -1,6 +1,17 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2017 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef _ARGUMENTS_H
|
||||
@@ -9,118 +20,69 @@
|
||||
#include <stddef.h>
|
||||
|
||||
|
||||
const long DEFAULT_INTERVAL = 10000000; // 10 ms
|
||||
const long DEFAULT_ALLOC_INTERVAL = 524287; // 512 KiB
|
||||
const long DEFAULT_LOCK_INTERVAL = 10000; // 10 us
|
||||
const long DEFAULT_INTERVAL = 10000000; // 10 ms
|
||||
const int DEFAULT_JSTACKDEPTH = 2048;
|
||||
|
||||
const char* const EVENT_CPU = "cpu";
|
||||
const char* const EVENT_ALLOC = "alloc";
|
||||
const char* const EVENT_NATIVEMEM = "nativemem";
|
||||
const char* const EVENT_LOCK = "lock";
|
||||
const char* const EVENT_WALL = "wall";
|
||||
const char* const EVENT_CTIMER = "ctimer";
|
||||
const char* const EVENT_ITIMER = "itimer";
|
||||
const char* const EVENT_CPU = "cpu";
|
||||
const char* const EVENT_ALLOC = "alloc";
|
||||
const char* const EVENT_LOCK = "lock";
|
||||
const char* const EVENT_WALL = "wall";
|
||||
const char* const EVENT_ITIMER = "itimer";
|
||||
const char* const EVENT_JSTACK = "jstack";
|
||||
|
||||
#define SHORT_ENUM __attribute__((__packed__))
|
||||
|
||||
enum SHORT_ENUM Action {
|
||||
enum Action {
|
||||
ACTION_NONE,
|
||||
ACTION_START,
|
||||
ACTION_RESUME,
|
||||
ACTION_STOP,
|
||||
ACTION_DUMP,
|
||||
ACTION_CHECK,
|
||||
ACTION_STATUS,
|
||||
ACTION_MEMINFO,
|
||||
ACTION_LIST,
|
||||
ACTION_VERSION
|
||||
ACTION_VERSION,
|
||||
ACTION_FULL_VERSION,
|
||||
ACTION_DUMP
|
||||
};
|
||||
|
||||
enum SHORT_ENUM Counter {
|
||||
enum Counter {
|
||||
COUNTER_SAMPLES,
|
||||
COUNTER_TOTAL
|
||||
};
|
||||
|
||||
enum Ring {
|
||||
RING_ANY,
|
||||
RING_KERNEL,
|
||||
RING_USER
|
||||
};
|
||||
|
||||
enum EventKind {
|
||||
EK_CPU = 1,
|
||||
EK_ALLOC = 2,
|
||||
EK_LOCK = 4
|
||||
};
|
||||
|
||||
enum Style {
|
||||
STYLE_SIMPLE = 0x1,
|
||||
STYLE_DOTTED = 0x2,
|
||||
STYLE_NORMALIZE = 0x4,
|
||||
STYLE_SIGNATURES = 0x8,
|
||||
STYLE_ANNOTATE = 0x10,
|
||||
STYLE_LIB_NAMES = 0x20,
|
||||
STYLE_NO_SEMICOLON = 0x40
|
||||
STYLE_SIMPLE = 1,
|
||||
STYLE_DOTTED = 2,
|
||||
STYLE_SIGNATURES = 4,
|
||||
STYLE_ANNOTATE = 8
|
||||
};
|
||||
|
||||
// Whenever enum changes, update SETTING_CSTACK in FlightRecorder
|
||||
enum SHORT_ENUM CStack {
|
||||
CSTACK_DEFAULT, // use perf_event_open stack if available or Frame Pointer links otherwise
|
||||
CSTACK_NO, // do not collect native frames
|
||||
CSTACK_FP, // walk stack using Frame Pointer links
|
||||
CSTACK_DWARF, // use DWARF unwinding info from .eh_frame section
|
||||
CSTACK_LBR, // Last Branch Record hardware capability
|
||||
CSTACK_VM, // unwind using HotSpot VMStructs
|
||||
CSTACK_VMX // same as CSTACK_VM but with intermediate native frames
|
||||
enum CStack {
|
||||
CSTACK_DEFAULT,
|
||||
CSTACK_NO,
|
||||
CSTACK_FP,
|
||||
CSTACK_LBR
|
||||
};
|
||||
|
||||
enum SHORT_ENUM Clock {
|
||||
CLK_DEFAULT,
|
||||
CLK_TSC,
|
||||
CLK_MONOTONIC
|
||||
};
|
||||
|
||||
enum SHORT_ENUM Output {
|
||||
enum Output {
|
||||
OUTPUT_NONE,
|
||||
OUTPUT_TEXT,
|
||||
OUTPUT_SVG, // obsolete
|
||||
OUTPUT_FLAT,
|
||||
OUTPUT_COLLAPSED,
|
||||
OUTPUT_FLAMEGRAPH,
|
||||
OUTPUT_TREE,
|
||||
OUTPUT_JFR
|
||||
};
|
||||
|
||||
enum JfrOption {
|
||||
NO_SYSTEM_INFO = 0x1,
|
||||
NO_SYSTEM_PROPS = 0x2,
|
||||
NO_NATIVE_LIBS = 0x4,
|
||||
NO_CPU_LOAD = 0x8,
|
||||
NO_HEAP_SUMMARY = 0x10,
|
||||
|
||||
IN_MEMORY = 0x100,
|
||||
|
||||
JFR_SYNC_OPTS = NO_SYSTEM_INFO | NO_SYSTEM_PROPS | NO_NATIVE_LIBS | NO_CPU_LOAD | NO_HEAP_SUMMARY
|
||||
};
|
||||
|
||||
struct StackWalkFeatures {
|
||||
// Stack recovery techniques used to workaround AsyncGetCallTrace flaws
|
||||
unsigned short unknown_java : 1;
|
||||
unsigned short unwind_stub : 1;
|
||||
unsigned short unwind_comp : 1;
|
||||
unsigned short unwind_native : 1;
|
||||
unsigned short java_anchor : 1;
|
||||
unsigned short gc_traces : 1;
|
||||
|
||||
// Common features
|
||||
unsigned short stats : 1;
|
||||
|
||||
// Additional HotSpot-specific features
|
||||
unsigned short probe_sp : 1;
|
||||
unsigned short vtable_target : 1;
|
||||
unsigned short comp_task : 1;
|
||||
unsigned short pc_addr : 1;
|
||||
unsigned short _reserved : 5;
|
||||
|
||||
StackWalkFeatures() : unknown_java(1), unwind_stub(1), unwind_comp(1), unwind_native(1), java_anchor(1), gc_traces(1),
|
||||
stats(0), probe_sp(0), vtable_target(0), comp_task(0), pc_addr(0), _reserved(0) {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Multiplier {
|
||||
char symbol;
|
||||
long multiplier;
|
||||
};
|
||||
|
||||
|
||||
class Error {
|
||||
private:
|
||||
@@ -148,150 +110,73 @@ class Arguments {
|
||||
bool _shared;
|
||||
|
||||
void appendToEmbeddedList(int& list, char* value);
|
||||
const char* expandFilePattern(const char* pattern);
|
||||
|
||||
static long long hash(const char* arg);
|
||||
static const char* expandFilePattern(char* dest, size_t max_size, const char* pattern);
|
||||
static Output detectOutputFormat(const char* file);
|
||||
static long parseUnits(const char* str, const Multiplier* multipliers);
|
||||
static int parseTimeout(const char* str);
|
||||
static long parseUnits(const char* str);
|
||||
|
||||
public:
|
||||
Action _action;
|
||||
Counter _counter;
|
||||
const char* _event;
|
||||
int _timeout;
|
||||
Ring _ring;
|
||||
int _events;
|
||||
const char* _event_desc;
|
||||
long _interval;
|
||||
long _alloc;
|
||||
long _nativemem;
|
||||
long _lock;
|
||||
long _wall;
|
||||
bool _all;
|
||||
int _jstackdepth;
|
||||
int _signal;
|
||||
int _jstackdepth;
|
||||
int _safe_mode;
|
||||
const char* _file;
|
||||
const char* _log;
|
||||
const char* _loglevel;
|
||||
const char* _unknown_arg;
|
||||
const char* _server;
|
||||
const char* _filter;
|
||||
int _include;
|
||||
int _exclude;
|
||||
unsigned char _mcache;
|
||||
bool _loop;
|
||||
bool _preloaded;
|
||||
bool _quiet;
|
||||
bool _threads;
|
||||
bool _sched;
|
||||
bool _record_cpu;
|
||||
bool _live;
|
||||
bool _nofree;
|
||||
bool _nobatch;
|
||||
bool _nostop;
|
||||
bool _alluser;
|
||||
bool _fdtransfer;
|
||||
const char* _fdtransfer_path;
|
||||
int _target_cpu;
|
||||
int _style;
|
||||
StackWalkFeatures _features;
|
||||
CStack _cstack;
|
||||
Clock _clock;
|
||||
Output _output;
|
||||
long _chunk_size;
|
||||
long _chunk_time;
|
||||
const char* _jfr_sync;
|
||||
int _jfr_options;
|
||||
int _dump_traces;
|
||||
int _dump_flat;
|
||||
unsigned int _file_num;
|
||||
const char* _begin;
|
||||
const char* _end;
|
||||
// FlameGraph parameters
|
||||
const char* _title;
|
||||
double _minwidth;
|
||||
bool _reverse;
|
||||
bool _inverted;
|
||||
|
||||
Arguments() :
|
||||
_buf(NULL),
|
||||
_shared(false),
|
||||
_action(ACTION_NONE),
|
||||
_counter(COUNTER_SAMPLES),
|
||||
_event(NULL),
|
||||
_timeout(0),
|
||||
_ring(RING_ANY),
|
||||
_events(0),
|
||||
_event_desc(NULL),
|
||||
_interval(0),
|
||||
_alloc(-1),
|
||||
_nativemem(-1),
|
||||
_lock(-1),
|
||||
_wall(-1),
|
||||
_all(false),
|
||||
_jstackdepth(DEFAULT_JSTACKDEPTH),
|
||||
_signal(0),
|
||||
_safe_mode(0),
|
||||
_file(NULL),
|
||||
_log(NULL),
|
||||
_loglevel(NULL),
|
||||
_unknown_arg(NULL),
|
||||
_server(NULL),
|
||||
_filter(NULL),
|
||||
_include(0),
|
||||
_exclude(0),
|
||||
_mcache(0),
|
||||
_loop(false),
|
||||
_preloaded(false),
|
||||
_quiet(false),
|
||||
_threads(false),
|
||||
_sched(false),
|
||||
_record_cpu(false),
|
||||
_live(false),
|
||||
_nofree(false),
|
||||
_nobatch(false),
|
||||
_nostop(false),
|
||||
_alluser(false),
|
||||
_fdtransfer(false),
|
||||
_fdtransfer_path(NULL),
|
||||
_target_cpu(-1),
|
||||
_style(0),
|
||||
_features(),
|
||||
_cstack(CSTACK_DEFAULT),
|
||||
_clock(CLK_DEFAULT),
|
||||
_output(OUTPUT_NONE),
|
||||
_chunk_size(100 * 1024 * 1024),
|
||||
_chunk_time(3600),
|
||||
_jfr_sync(NULL),
|
||||
_jfr_options(0),
|
||||
_dump_traces(0),
|
||||
_dump_flat(0),
|
||||
_file_num(0),
|
||||
_begin(NULL),
|
||||
_end(NULL),
|
||||
_title(NULL),
|
||||
_title("Flame Graph"),
|
||||
_minwidth(0),
|
||||
_reverse(false),
|
||||
_inverted(false) {
|
||||
_reverse(false) {
|
||||
}
|
||||
|
||||
~Arguments();
|
||||
|
||||
void save();
|
||||
void save(Arguments& other);
|
||||
|
||||
Error parse(const char* args);
|
||||
|
||||
const char* file();
|
||||
|
||||
bool hasTemporaryLog() const;
|
||||
|
||||
bool hasOutputFile() const {
|
||||
return _file != NULL &&
|
||||
(_action == ACTION_STOP || _action == ACTION_DUMP ? _output != OUTPUT_JFR : _action >= ACTION_CHECK);
|
||||
}
|
||||
|
||||
bool hasOption(JfrOption option) const {
|
||||
return (_jfr_options & option) != 0;
|
||||
}
|
||||
bool addEvent(const char* event);
|
||||
|
||||
friend class FrameName;
|
||||
friend class Recording;
|
||||
};
|
||||
|
||||
extern Arguments _global_args;
|
||||
|
||||
#endif // _ARGUMENTS_H
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include "asprof.h"
|
||||
#include "hooks.h"
|
||||
#include "profiler.h"
|
||||
#include "tsc.h"
|
||||
#include "threadLocalData.h"
|
||||
#include "userEvents.h"
|
||||
|
||||
static asprof_error_t asprof_error(const char* msg) {
|
||||
return (asprof_error_t)msg;
|
||||
}
|
||||
|
||||
|
||||
DLLEXPORT void asprof_init() {
|
||||
Hooks::init(true);
|
||||
}
|
||||
|
||||
DLLEXPORT const char* asprof_error_str(asprof_error_t err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
DLLEXPORT asprof_error_t asprof_execute(const char* command, asprof_writer_t output_callback) {
|
||||
Arguments args;
|
||||
Error error = args.parse(command);
|
||||
if (error) {
|
||||
return asprof_error(error.message());
|
||||
}
|
||||
|
||||
Log::open(args);
|
||||
|
||||
if (!args.hasOutputFile()) {
|
||||
CallbackWriter out(output_callback);
|
||||
error = Profiler::instance()->runInternal(args, out);
|
||||
if (!error) {
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
FileWriter out(args.file());
|
||||
if (!out.is_open()) {
|
||||
return asprof_error("Could not open output file");
|
||||
}
|
||||
error = Profiler::instance()->runInternal(args, out);
|
||||
if (!error) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return asprof_error(error.message());
|
||||
}
|
||||
|
||||
DLLEXPORT asprof_thread_local_data* asprof_get_thread_local_data(void) {
|
||||
return ThreadLocalData::getThreadLocalData();
|
||||
}
|
||||
|
||||
DLLEXPORT asprof_jfr_event_key asprof_register_jfr_event(const char* name) {
|
||||
return UserEvents::registerEvent(name);
|
||||
}
|
||||
|
||||
#define asprof_str(s) #s
|
||||
|
||||
DLLEXPORT asprof_error_t asprof_emit_jfr_event(asprof_jfr_event_key type, const uint8_t* data, size_t len) {
|
||||
if (len > ASPROF_MAX_JFR_EVENT_LENGTH) {
|
||||
return asprof_error("Unable to emit JFR event larger than " asprof_str(ASPROF_MAX_JFR_EVENT_LENGTH) " bytes");
|
||||
}
|
||||
|
||||
UserEvent event;
|
||||
event._start_time = TSC::ticks();
|
||||
event._type = type;
|
||||
event._data = data;
|
||||
event._len = len;
|
||||
Profiler::instance()->recordEventOnly(USER_EVENT, &event);
|
||||
return NULL;
|
||||
}
|
||||
106
src/asprof.h
@@ -1,106 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef _ASPROF_H
|
||||
#define _ASPROF_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __clang__
|
||||
# define DLLEXPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
# define DLLEXPORT __attribute__((visibility("default"),externally_visible))
|
||||
#endif
|
||||
|
||||
#define WEAK __attribute__((weak))
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
typedef const char* asprof_error_t;
|
||||
typedef void (*asprof_writer_t)(const char* buf, size_t size);
|
||||
|
||||
// Should be called once prior to any other API functions
|
||||
DLLEXPORT void asprof_init();
|
||||
typedef void (*asprof_init_t)();
|
||||
|
||||
// Returns an error message for the given error code or NULL if there is no error
|
||||
DLLEXPORT const char* asprof_error_str(asprof_error_t err);
|
||||
typedef const char* (*asprof_error_str_t)(asprof_error_t err);
|
||||
|
||||
// Executes async-profiler command using output_callback as an optional sink
|
||||
// for the profiler output. Returning an error code or NULL on success.
|
||||
DLLEXPORT asprof_error_t asprof_execute(const char* command, asprof_writer_t output_callback);
|
||||
typedef asprof_error_t (*asprof_execute_t)(const char* command, asprof_writer_t output_callback);
|
||||
|
||||
// This API is UNSTABLE and might change or be removed in the next version of async-profiler.
|
||||
typedef struct {
|
||||
// A thread-local sample counter, which increments (not necessarily by 1) every time a
|
||||
// stack profiling sample is taken using a profiling signal.
|
||||
//
|
||||
// The counter might be initialized lazily, only starting counting from 0 the first time
|
||||
// `asprof_get_thread_local_data` is called on a given thread. Further calls to
|
||||
// `asprof_get_thread_local_data` on a given thread will of course not reset the counter.
|
||||
volatile uint64_t sample_counter;
|
||||
} asprof_thread_local_data;
|
||||
|
||||
// This API is UNSTABLE and might change or be removed in the next version of async-profiler.
|
||||
//
|
||||
// Gets a pointer to asprof's thread-local data structure, see `asprof_thread_local_data`'s
|
||||
// documentation for the details of each field. This function might lazily initialize that
|
||||
// structure.
|
||||
//
|
||||
// This function can return NULL either if the profiler is not yet initializer, or in
|
||||
// case of an allocation failure.
|
||||
//
|
||||
// This function is *not* async-signal-safe. However, it is safe to call concurrently
|
||||
// with async-profiler operations, including initialization.
|
||||
DLLEXPORT asprof_thread_local_data* asprof_get_thread_local_data(void);
|
||||
typedef asprof_thread_local_data* (*asprof_get_thread_local_data_t)(void);
|
||||
|
||||
|
||||
typedef int asprof_jfr_event_key;
|
||||
|
||||
// This API is UNSTABLE and might change or be removed in the next version of async-profiler.
|
||||
//
|
||||
// Return a asprof_jfr_event_key identifier for a user-defined JFR key.
|
||||
// That identifier can then be used in `asprof_emit_jfr_event`
|
||||
//
|
||||
// The name is required to be valid (since it's a C string, NUL-free) UTF-8.
|
||||
//
|
||||
// Returns -1 on failure.
|
||||
DLLEXPORT asprof_jfr_event_key asprof_register_jfr_event(const char* name);
|
||||
typedef asprof_jfr_event_key (*asprof_register_jfr_event_t)(const char* name);
|
||||
|
||||
|
||||
#define ASPROF_MAX_JFR_EVENT_LENGTH 2048
|
||||
|
||||
// This API is UNSTABLE and might change or be removed in the next version of async-profiler.
|
||||
//
|
||||
// Emits a custom, user-defined JFR event. The key should be created via `asprof_register_jfr_event`.
|
||||
// The data can be arbitrary binary data, with size <= ASPROF_MAX_JFR_EVENT_LENGTH.
|
||||
//
|
||||
// User-defined events are included in the JFR under a `profiler.UserEvent` event type. That type will contain
|
||||
// (at least) the following fields:
|
||||
// 1. `startTime` [Long] - the emitted event's time in ticks.
|
||||
// 2. `eventThread` [java.lang.Thread] - the thread that emitted the events.
|
||||
// 3. `type` [profiler.types.UserEventType] - the event's type,
|
||||
// where `profiler.types.UserEventType` is an indexed string from the JFR constant pool.
|
||||
// 4. `data` [String] - the event data. This is the Latin-1 encoded version of the inputted data.
|
||||
// The Latin-1 encoding is used as a way to stuff the arbitrary byte input into something
|
||||
// that JFR supports (JFR technically supports byte arrays, but `jfr print` doesn't).
|
||||
//
|
||||
// Returns an error code or NULL on success.
|
||||
DLLEXPORT asprof_error_t asprof_emit_jfr_event(asprof_jfr_event_key type, const uint8_t* data, size_t len);
|
||||
typedef asprof_error_t (*asprof_emit_jfr_event_t)(asprof_jfr_event_key type, const uint8_t* data, size_t len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // _ASPROF_H
|
||||
@@ -1,17 +1,28 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2020 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include "callTraceStorage.h"
|
||||
#include "os.h"
|
||||
|
||||
#define COMMA ,
|
||||
|
||||
static const u32 INITIAL_CAPACITY = 65536;
|
||||
static const u32 CALL_TRACE_CHUNK = 8 * 1024 * 1024;
|
||||
static const u32 OVERFLOW_TRACE_ID = 0x7fffffff;
|
||||
static const size_t PAGE_ALIGNMENT = sysconf(_SC_PAGESIZE) - 1;
|
||||
|
||||
|
||||
class LongHashTable {
|
||||
@@ -25,7 +36,7 @@ class LongHashTable {
|
||||
|
||||
static size_t getSize(u32 capacity) {
|
||||
size_t size = sizeof(LongHashTable) + (sizeof(u64) + sizeof(CallTraceSample)) * capacity;
|
||||
return (size + OS::page_mask) & ~OS::page_mask;
|
||||
return (size + PAGE_ALIGNMENT) & ~PAGE_ALIGNMENT;
|
||||
}
|
||||
|
||||
public:
|
||||
@@ -45,10 +56,6 @@ class LongHashTable {
|
||||
return prev;
|
||||
}
|
||||
|
||||
size_t usedMemory() {
|
||||
return getSize(_capacity);
|
||||
}
|
||||
|
||||
LongHashTable* prev() {
|
||||
return _prev;
|
||||
}
|
||||
@@ -79,11 +86,9 @@ class LongHashTable {
|
||||
}
|
||||
};
|
||||
|
||||
CallTrace CallTraceStorage::_overflow_trace = {1, {BCI_ERROR, LP64_ONLY(0 COMMA) (jmethodID)"storage_overflow"}};
|
||||
|
||||
CallTraceStorage::CallTraceStorage() : _allocator(CALL_TRACE_CHUNK) {
|
||||
_current_table = LongHashTable::allocate(NULL, INITIAL_CAPACITY);
|
||||
_overflow = 0;
|
||||
}
|
||||
|
||||
CallTraceStorage::~CallTraceStorage() {
|
||||
@@ -98,21 +103,6 @@ void CallTraceStorage::clear() {
|
||||
}
|
||||
_current_table->clear();
|
||||
_allocator.clear();
|
||||
_overflow = 0;
|
||||
}
|
||||
|
||||
u32 CallTraceStorage::capacity() {
|
||||
// As capacity of each subsequent table doubles,
|
||||
// total capacity is a sum of geometric series: 64K + 128K + 256K...
|
||||
return _current_table->capacity() * 2 - INITIAL_CAPACITY;
|
||||
}
|
||||
|
||||
size_t CallTraceStorage::usedMemory() {
|
||||
size_t bytes = _allocator.usedMemory();
|
||||
for (LongHashTable* table = _current_table; table != NULL; table = table->prev()) {
|
||||
bytes += table->usedMemory();
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
void CallTraceStorage::collectTraces(std::map<u32, CallTrace*>& map) {
|
||||
@@ -122,20 +112,11 @@ void CallTraceStorage::collectTraces(std::map<u32, CallTrace*>& map) {
|
||||
u32 capacity = table->capacity();
|
||||
|
||||
for (u32 slot = 0; slot < capacity; slot++) {
|
||||
if (keys[slot] != 0 && loadAcquire(values[slot].samples) != 0) {
|
||||
// Reset samples to avoid duplication of call traces between JFR chunks
|
||||
values[slot].samples = 0;
|
||||
CallTrace* trace = values[slot].acquireTrace();
|
||||
if (trace != NULL) {
|
||||
map[capacity - (INITIAL_CAPACITY - 1) + slot] = trace;
|
||||
}
|
||||
if (keys[slot] != 0) {
|
||||
map[capacity - (INITIAL_CAPACITY - 1) + slot] = values[slot].trace;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (_overflow > 0) {
|
||||
map[OVERFLOW_TRACE_ID] = &_overflow_trace;
|
||||
}
|
||||
}
|
||||
|
||||
void CallTraceStorage::collectSamples(std::vector<CallTraceSample*>& samples) {
|
||||
@@ -152,20 +133,6 @@ void CallTraceStorage::collectSamples(std::vector<CallTraceSample*>& samples) {
|
||||
}
|
||||
}
|
||||
|
||||
void CallTraceStorage::collectSamples(std::map<u64, CallTraceSample>& map) {
|
||||
for (LongHashTable* table = _current_table; table != NULL; table = table->prev()) {
|
||||
u64* keys = table->keys();
|
||||
CallTraceSample* values = table->values();
|
||||
u32 capacity = table->capacity();
|
||||
|
||||
for (u32 slot = 0; slot < capacity; slot++) {
|
||||
if (keys[slot] != 0 && values[slot].acquireTrace() != NULL) {
|
||||
map[keys[slot]] += values[slot];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adaptation of MurmurHash64A by Austin Appleby
|
||||
u64 CallTraceStorage::calcHash(int num_frames, ASGCT_CallFrame* frames) {
|
||||
const u64 M = 0xc6a4a7935bd1e995ULL;
|
||||
@@ -258,56 +225,22 @@ u32 CallTraceStorage::put(int num_frames, ASGCT_CallFrame* frames, u64 counter)
|
||||
if (trace == NULL) {
|
||||
trace = storeCallTrace(num_frames, frames);
|
||||
}
|
||||
table->values()[slot].setTrace(trace);
|
||||
table->values()[slot].trace = trace;
|
||||
break;
|
||||
}
|
||||
|
||||
if (++step >= capacity) {
|
||||
// Very unlikely case of a table overflow
|
||||
atomicInc(_overflow);
|
||||
return OVERFLOW_TRACE_ID;
|
||||
return 0;
|
||||
}
|
||||
// Improved version of linear probing
|
||||
slot = (slot + step) & (capacity - 1);
|
||||
}
|
||||
|
||||
if (counter != 0) {
|
||||
CallTraceSample& s = table->values()[slot];
|
||||
atomicInc(s.samples);
|
||||
atomicInc(s.counter, counter);
|
||||
}
|
||||
// TODO: check overhead
|
||||
CallTraceSample& s = table->values()[slot];
|
||||
atomicInc(s.samples);
|
||||
atomicInc(s.counter, counter);
|
||||
|
||||
return capacity - (INITIAL_CAPACITY - 1) + slot;
|
||||
}
|
||||
|
||||
void CallTraceStorage::add(u32 call_trace_id, u64 samples, u64 counter) {
|
||||
if (call_trace_id > capacity()) { // this also covers call_trace_id == OVERFLOW_TRACE_ID
|
||||
return;
|
||||
}
|
||||
|
||||
call_trace_id += (INITIAL_CAPACITY - 1);
|
||||
for (LongHashTable* table = _current_table; table != NULL; table = table->prev()) {
|
||||
if (call_trace_id >= table->capacity()) {
|
||||
CallTraceSample& s = table->values()[call_trace_id - table->capacity()];
|
||||
atomicInc(s.samples, samples);
|
||||
atomicInc(s.counter, counter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CallTraceStorage::resetCounters() {
|
||||
for (LongHashTable* table = _current_table; table != NULL; table = table->prev()) {
|
||||
u64* keys = table->keys();
|
||||
CallTraceSample* values = table->values();
|
||||
u32 capacity = table->capacity();
|
||||
|
||||
for (u32 slot = 0; slot < capacity; slot++) {
|
||||
if (keys[slot] != 0) {
|
||||
CallTraceSample& s = values[slot];
|
||||
storeRelease(s.samples, 0);
|
||||
storeRelease(s.counter, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2020 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef _CALLTRACESTORAGE_H
|
||||
@@ -24,30 +35,12 @@ struct CallTraceSample {
|
||||
CallTrace* trace;
|
||||
u64 samples;
|
||||
u64 counter;
|
||||
|
||||
CallTrace* acquireTrace() {
|
||||
return __atomic_load_n(&trace, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
void setTrace(CallTrace* value) {
|
||||
return __atomic_store_n(&trace, value, __ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
CallTraceSample& operator+=(const CallTraceSample& s) {
|
||||
trace = s.trace;
|
||||
samples += s.samples;
|
||||
counter += s.counter;
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
class CallTraceStorage {
|
||||
private:
|
||||
static CallTrace _overflow_trace;
|
||||
|
||||
LinearAllocator _allocator;
|
||||
LongHashTable* _current_table;
|
||||
u64 _overflow;
|
||||
|
||||
u64 calcHash(int num_frames, ASGCT_CallFrame* frames);
|
||||
CallTrace* storeCallTrace(int num_frames, ASGCT_CallFrame* frames);
|
||||
@@ -58,16 +51,10 @@ class CallTraceStorage {
|
||||
~CallTraceStorage();
|
||||
|
||||
void clear();
|
||||
u32 capacity();
|
||||
size_t usedMemory();
|
||||
|
||||
void collectTraces(std::map<u32, CallTrace*>& map);
|
||||
void collectSamples(std::vector<CallTraceSample*>& samples);
|
||||
void collectSamples(std::map<u64, CallTraceSample>& map);
|
||||
|
||||
u32 put(int num_frames, ASGCT_CallFrame* frames, u64 counter);
|
||||
void add(u32 call_trace_id, u64 samples, u64 counter);
|
||||
void resetCounters();
|
||||
};
|
||||
|
||||
#endif // _CALLTRACESTORAGE
|
||||
|
||||
32
src/chk.cpp
@@ -1,32 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef __clang__
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "asprof.h"
|
||||
|
||||
|
||||
// libgcc refers to __sprintf_chk, but there is no such symbol in musl libc.
|
||||
// Export a weak symbol in order to make profiler library work both with glibc and musl.
|
||||
|
||||
extern "C" WEAK DLLEXPORT
|
||||
int __sprintf_chk(char* s, int flag, size_t slen, const char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
int ret = vsnprintf(s, slen, format, args);
|
||||
va_end(args);
|
||||
|
||||
if (ret >= slen) {
|
||||
fprintf(stderr, "__sprintf_chk failed\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif // __clang__
|
||||
@@ -1,83 +1,42 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2016 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include "codeCache.h"
|
||||
#include "dwarf.h"
|
||||
#include "os.h"
|
||||
|
||||
|
||||
char* NativeFunc::create(const char* name, short lib_index) {
|
||||
NativeFunc* f = (NativeFunc*)malloc(sizeof(NativeFunc) + 1 + strlen(name));
|
||||
f->_lib_index = lib_index;
|
||||
f->_mark = 0;
|
||||
return strcpy(f->_name, name);
|
||||
}
|
||||
|
||||
void NativeFunc::destroy(char* name) {
|
||||
free(from(name));
|
||||
}
|
||||
|
||||
size_t NativeFunc::usedMemory(const char* name) {
|
||||
return sizeof(NativeFunc) + 1 + strlen(from(name)->_name);
|
||||
}
|
||||
|
||||
|
||||
CodeCache::CodeCache(const char* name, short lib_index, bool imports_patchable,
|
||||
const void* min_address, const void* max_address) {
|
||||
_name = NativeFunc::create(name, -1);
|
||||
_lib_index = lib_index;
|
||||
_min_address = min_address;
|
||||
_max_address = max_address;
|
||||
_text_base = NULL;
|
||||
|
||||
_plt_offset = 0;
|
||||
_plt_size = 0;
|
||||
|
||||
memset(_imports, 0, sizeof(_imports));
|
||||
_imports_patchable = imports_patchable;
|
||||
_debug_symbols = false;
|
||||
|
||||
_dwarf_table = NULL;
|
||||
_dwarf_table_length = 0;
|
||||
|
||||
_capacity = INITIAL_CODE_CACHE_CAPACITY;
|
||||
_count = 0;
|
||||
_blobs = new CodeBlob[_capacity];
|
||||
}
|
||||
|
||||
CodeCache::~CodeCache() {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
NativeFunc::destroy(_blobs[i]._name);
|
||||
}
|
||||
NativeFunc::destroy(_name);
|
||||
delete[] _blobs;
|
||||
free(_dwarf_table);
|
||||
}
|
||||
|
||||
void CodeCache::expand() {
|
||||
CodeBlob* old_blobs = _blobs;
|
||||
CodeBlob* new_blobs = new CodeBlob[_capacity * 2];
|
||||
|
||||
memcpy(new_blobs, old_blobs, _count * sizeof(CodeBlob));
|
||||
int live = 0;
|
||||
for (int i = 0; i < _count; i++) {
|
||||
if (_blobs[i]._method != NULL) {
|
||||
new_blobs[live++] = _blobs[i];
|
||||
}
|
||||
}
|
||||
|
||||
_count = live;
|
||||
_capacity *= 2;
|
||||
_blobs = new_blobs;
|
||||
delete[] old_blobs;
|
||||
}
|
||||
|
||||
void CodeCache::add(const void* start, int length, const char* name, bool update_bounds) {
|
||||
char* name_copy = NativeFunc::create(name, _lib_index);
|
||||
// Replace non-printable characters
|
||||
for (char* s = name_copy; *s != 0; s++) {
|
||||
if (*s < ' ') *s = '?';
|
||||
}
|
||||
|
||||
void CodeCache::add(const void* start, int length, jmethodID method, bool update_bounds) {
|
||||
if (_count >= _capacity) {
|
||||
expand();
|
||||
}
|
||||
@@ -85,20 +44,58 @@ void CodeCache::add(const void* start, int length, const char* name, bool update
|
||||
const void* end = (const char*)start + length;
|
||||
_blobs[_count]._start = start;
|
||||
_blobs[_count]._end = end;
|
||||
_blobs[_count]._name = name_copy;
|
||||
_blobs[_count]._method = method;
|
||||
_count++;
|
||||
|
||||
if (update_bounds) {
|
||||
updateBounds(start, end);
|
||||
if (start < _min_address) _min_address = start;
|
||||
if (end > _max_address) _max_address = end;
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::updateBounds(const void* start, const void* end) {
|
||||
if (start < _min_address) _min_address = start;
|
||||
if (end > _max_address) _max_address = end;
|
||||
void CodeCache::remove(const void* start, jmethodID method) {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
if (_blobs[i]._start == start && _blobs[i]._method == method) {
|
||||
_blobs[i]._method = NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::sort() {
|
||||
jmethodID CodeCache::find(const void* address) {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
CodeBlob* cb = _blobs + i;
|
||||
if (address >= cb->_start && address < cb->_end && cb->_method != NULL) {
|
||||
return _blobs[i]._method;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
NativeCodeCache::NativeCodeCache(const char* name, const void* min_address, const void* max_address) {
|
||||
_name = strdup(name);
|
||||
_min_address = min_address;
|
||||
_max_address = max_address;
|
||||
}
|
||||
|
||||
NativeCodeCache::~NativeCodeCache() {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
free(_blobs[i]._method);
|
||||
}
|
||||
free(_name);
|
||||
}
|
||||
|
||||
void NativeCodeCache::add(const void* start, int length, const char* name, bool update_bounds) {
|
||||
char* name_copy = strdup(name);
|
||||
// Replace non-printable characters
|
||||
for (char* s = name_copy; *s != 0; s++) {
|
||||
if (*s < ' ') *s = '?';
|
||||
}
|
||||
CodeCache::add(start, length, (jmethodID)name_copy, update_bounds);
|
||||
}
|
||||
|
||||
void NativeCodeCache::sort() {
|
||||
if (_count == 0) return;
|
||||
|
||||
qsort(_blobs, _count, sizeof(CodeBlob), CodeBlob::comparator);
|
||||
@@ -107,26 +104,7 @@ void CodeCache::sort() {
|
||||
if (_max_address == NO_MAX_ADDRESS) _max_address = _blobs[_count - 1]._end;
|
||||
}
|
||||
|
||||
CodeBlob* CodeCache::findBlob(const char* name) {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
const char* blob_name = _blobs[i]._name;
|
||||
if (blob_name != NULL && strcmp(blob_name, name) == 0) {
|
||||
return &_blobs[i];
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
CodeBlob* CodeCache::findBlobByAddress(const void* address) {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
if (address >= _blobs[i]._start && address < _blobs[i]._end) {
|
||||
return &_blobs[i];
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const char* CodeCache::binarySearch(const void* address) {
|
||||
const char* NativeCodeCache::binarySearch(const void* address) {
|
||||
int low = 0;
|
||||
int high = _count - 1;
|
||||
|
||||
@@ -137,177 +115,38 @@ const char* CodeCache::binarySearch(const void* address) {
|
||||
} else if (_blobs[mid]._start > address) {
|
||||
high = mid - 1;
|
||||
} else {
|
||||
return _blobs[mid]._name;
|
||||
return (const char*)_blobs[mid]._method;
|
||||
}
|
||||
}
|
||||
|
||||
// Symbols with zero size can be valid functions: e.g. ASM entry points or kernel code.
|
||||
// Also, in some cases (endless loop) the return address may point beyond the function.
|
||||
if (low > 0 && (_blobs[low - 1]._start == _blobs[low - 1]._end || _blobs[low - 1]._end == address)) {
|
||||
return _blobs[low - 1]._name;
|
||||
return (const char*)_blobs[low - 1]._method;
|
||||
}
|
||||
return _name;
|
||||
}
|
||||
|
||||
const void* CodeCache::findSymbol(const char* name) {
|
||||
CodeBlob* blob = findBlob(name);
|
||||
return blob == NULL ? NULL : blob->_start;
|
||||
const void* NativeCodeCache::findSymbol(const char* name) {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
const char* blob_name = (const char*)_blobs[i]._method;
|
||||
if (blob_name != NULL && strcmp(blob_name, name) == 0) {
|
||||
return _blobs[i]._start;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const void* CodeCache::findSymbolByPrefix(const char* prefix) {
|
||||
const void* NativeCodeCache::findSymbolByPrefix(const char* prefix) {
|
||||
return findSymbolByPrefix(prefix, strlen(prefix));
|
||||
}
|
||||
|
||||
const void* CodeCache::findSymbolByPrefix(const char* prefix, int prefix_len) {
|
||||
const void* result = NULL;
|
||||
const void* NativeCodeCache::findSymbolByPrefix(const char* prefix, int prefix_len) {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
const char* blob_name = _blobs[i]._name;
|
||||
const char* blob_name = (const char*)_blobs[i]._method;
|
||||
if (blob_name != NULL && strncmp(blob_name, prefix, prefix_len) == 0) {
|
||||
result = _blobs[i]._start;
|
||||
// Symbols which contain a dot are only patched if no alternative is found,
|
||||
// see #1247
|
||||
if (strchr(blob_name + prefix_len, '.') == NULL) {
|
||||
return result;
|
||||
}
|
||||
return _blobs[i]._start;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void CodeCache::saveImport(ImportId id, void** entry) {
|
||||
for (int ty = 0; ty < NUM_IMPORT_TYPES; ty++) {
|
||||
if (_imports[id][ty] == nullptr) {
|
||||
_imports[id][ty] = entry;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::addImport(void** entry, const char* name) {
|
||||
switch (name[0]) {
|
||||
case 'a':
|
||||
if (strcmp(name, "aligned_alloc") == 0) {
|
||||
saveImport(im_aligned_alloc, entry);
|
||||
}
|
||||
break;
|
||||
case 'c':
|
||||
if (strcmp(name, "calloc") == 0) {
|
||||
saveImport(im_calloc, entry);
|
||||
}
|
||||
break;
|
||||
case 'd':
|
||||
if (strcmp(name, "dlopen") == 0) {
|
||||
saveImport(im_dlopen, entry);
|
||||
}
|
||||
break;
|
||||
case 'f':
|
||||
if (strcmp(name, "free") == 0) {
|
||||
saveImport(im_free, entry);
|
||||
}
|
||||
break;
|
||||
case 'm':
|
||||
if (strcmp(name, "malloc") == 0) {
|
||||
saveImport(im_malloc, entry);
|
||||
}
|
||||
break;
|
||||
case 'p':
|
||||
if (strcmp(name, "pthread_create") == 0) {
|
||||
saveImport(im_pthread_create, entry);
|
||||
} else if (strcmp(name, "pthread_exit") == 0) {
|
||||
saveImport(im_pthread_exit, entry);
|
||||
} else if (strcmp(name, "pthread_setspecific") == 0) {
|
||||
saveImport(im_pthread_setspecific, entry);
|
||||
} else if (strcmp(name, "poll") == 0) {
|
||||
saveImport(im_poll, entry);
|
||||
} else if (strcmp(name, "posix_memalign") == 0) {
|
||||
saveImport(im_posix_memalign, entry);
|
||||
}
|
||||
break;
|
||||
case 'r':
|
||||
if (strcmp(name, "realloc") == 0) {
|
||||
saveImport(im_realloc, entry);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void** CodeCache::findImport(ImportId id) {
|
||||
if (!_imports_patchable) {
|
||||
makeImportsPatchable();
|
||||
_imports_patchable = true;
|
||||
}
|
||||
return _imports[id][PRIMARY];
|
||||
}
|
||||
|
||||
void CodeCache::patchImport(ImportId id, void* hook_func) {
|
||||
if (!_imports_patchable) {
|
||||
makeImportsPatchable();
|
||||
_imports_patchable = true;
|
||||
}
|
||||
|
||||
for (int ty = 0; ty < NUM_IMPORT_TYPES; ty++) {
|
||||
void** entry = _imports[id][ty];
|
||||
if (entry != NULL) {
|
||||
*entry = hook_func;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::makeImportsPatchable() {
|
||||
void** min_import = (void**)-1;
|
||||
void** max_import = NULL;
|
||||
for (int i = 0; i < NUM_IMPORTS; i++) {
|
||||
for (int j = 0; j < NUM_IMPORT_TYPES; j++) {
|
||||
void** entry = _imports[i][j];
|
||||
if (entry == NULL) continue;
|
||||
if (entry < min_import) min_import = entry;
|
||||
if (entry > max_import) max_import = entry;
|
||||
}
|
||||
}
|
||||
|
||||
if (max_import != NULL) {
|
||||
uintptr_t patch_start = (uintptr_t)min_import & ~OS::page_mask;
|
||||
uintptr_t patch_end = (uintptr_t)max_import & ~OS::page_mask;
|
||||
mprotect((void*)patch_start, patch_end - patch_start + OS::page_size, PROT_READ | PROT_WRITE);
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::setDwarfTable(FrameDesc* table, int length) {
|
||||
_dwarf_table = table;
|
||||
_dwarf_table_length = length;
|
||||
}
|
||||
|
||||
FrameDesc* CodeCache::findFrameDesc(const void* pc) {
|
||||
u32 target_loc = (const char*)pc - _text_base;
|
||||
int low = 0;
|
||||
int high = _dwarf_table_length - 1;
|
||||
|
||||
while (low <= high) {
|
||||
int mid = (unsigned int)(low + high) >> 1;
|
||||
if (_dwarf_table[mid].loc < target_loc) {
|
||||
low = mid + 1;
|
||||
} else if (_dwarf_table[mid].loc > target_loc) {
|
||||
high = mid - 1;
|
||||
} else {
|
||||
return &_dwarf_table[mid];
|
||||
}
|
||||
}
|
||||
|
||||
if (low > 0) {
|
||||
return &_dwarf_table[low - 1];
|
||||
} else if (target_loc - _plt_offset < _plt_size) {
|
||||
return &FrameDesc::empty_frame;
|
||||
} else {
|
||||
return &FrameDesc::default_frame;
|
||||
}
|
||||
}
|
||||
|
||||
size_t CodeCache::usedMemory() {
|
||||
size_t bytes = _capacity * sizeof(CodeBlob);
|
||||
bytes += _dwarf_table_length * sizeof(FrameDesc);
|
||||
bytes += NativeFunc::usedMemory(_name);
|
||||
for (int i = 0; i < _count; i++) {
|
||||
bytes += NativeFunc::usedMemory(_blobs[i]._name);
|
||||
}
|
||||
return bytes;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
209
src/codeCache.h
@@ -1,6 +1,17 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2017 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef _CODECACHE_H
|
||||
@@ -13,74 +24,13 @@
|
||||
#define NO_MAX_ADDRESS ((const void*)0)
|
||||
|
||||
const int INITIAL_CODE_CACHE_CAPACITY = 1000;
|
||||
const int MAX_NATIVE_LIBS = 2048;
|
||||
|
||||
|
||||
enum ImportId {
|
||||
im_dlopen,
|
||||
im_pthread_create,
|
||||
im_pthread_exit,
|
||||
im_pthread_setspecific,
|
||||
im_poll,
|
||||
im_malloc,
|
||||
im_calloc,
|
||||
im_realloc,
|
||||
im_free,
|
||||
im_posix_memalign,
|
||||
im_aligned_alloc,
|
||||
NUM_IMPORTS
|
||||
};
|
||||
|
||||
enum ImportType {
|
||||
PRIMARY,
|
||||
SECONDARY,
|
||||
NUM_IMPORT_TYPES
|
||||
};
|
||||
|
||||
enum Mark {
|
||||
MARK_VM_RUNTIME = 1,
|
||||
MARK_INTERPRETER = 2,
|
||||
MARK_COMPILER_ENTRY = 3,
|
||||
MARK_ASYNC_PROFILER = 4, // async-profiler internals such as native hooks.
|
||||
};
|
||||
|
||||
|
||||
class NativeFunc {
|
||||
private:
|
||||
short _lib_index;
|
||||
char _mark;
|
||||
char _reserved;
|
||||
char _name[0];
|
||||
|
||||
static NativeFunc* from(const char* name) {
|
||||
return (NativeFunc*)(name - sizeof(NativeFunc));
|
||||
}
|
||||
|
||||
public:
|
||||
static char* create(const char* name, short lib_index);
|
||||
static void destroy(char* name);
|
||||
|
||||
static size_t usedMemory(const char* name);
|
||||
|
||||
static short libIndex(const char* name) {
|
||||
return from(name)->_lib_index;
|
||||
}
|
||||
|
||||
static char mark(const char* name) {
|
||||
return from(name)->_mark;
|
||||
}
|
||||
|
||||
static void mark(const char* name, char value) {
|
||||
from(name)->_mark = value;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class CodeBlob {
|
||||
public:
|
||||
const void* _start;
|
||||
const void* _end;
|
||||
char* _name;
|
||||
jmethodID _method;
|
||||
|
||||
static int comparator(const void* c1, const void* c2) {
|
||||
CodeBlob* cb1 = (CodeBlob*)c1;
|
||||
@@ -98,135 +48,60 @@ class CodeBlob {
|
||||
};
|
||||
|
||||
|
||||
class FrameDesc;
|
||||
|
||||
class CodeCache {
|
||||
private:
|
||||
char* _name;
|
||||
short _lib_index;
|
||||
const void* _min_address;
|
||||
const void* _max_address;
|
||||
const char* _text_base;
|
||||
|
||||
unsigned int _plt_offset;
|
||||
unsigned int _plt_size;
|
||||
|
||||
void** _imports[NUM_IMPORTS][NUM_IMPORT_TYPES];
|
||||
bool _imports_patchable;
|
||||
bool _debug_symbols;
|
||||
|
||||
FrameDesc* _dwarf_table;
|
||||
int _dwarf_table_length;
|
||||
|
||||
protected:
|
||||
int _capacity;
|
||||
int _count;
|
||||
CodeBlob* _blobs;
|
||||
const void* _min_address;
|
||||
const void* _max_address;
|
||||
|
||||
void expand();
|
||||
void makeImportsPatchable();
|
||||
void saveImport(ImportId id, void** entry);
|
||||
|
||||
public:
|
||||
CodeCache(const char* name,
|
||||
short lib_index = -1,
|
||||
bool imports_patchable = false,
|
||||
const void* min_address = NO_MIN_ADDRESS,
|
||||
const void* max_address = NO_MAX_ADDRESS);
|
||||
|
||||
~CodeCache();
|
||||
|
||||
const char* name() const {
|
||||
return _name;
|
||||
CodeCache() {
|
||||
_capacity = INITIAL_CODE_CACHE_CAPACITY;
|
||||
_count = 0;
|
||||
_blobs = new CodeBlob[_capacity];
|
||||
_min_address = NO_MIN_ADDRESS;
|
||||
_max_address = NO_MAX_ADDRESS;
|
||||
}
|
||||
|
||||
const void* minAddress() const {
|
||||
return _min_address;
|
||||
~CodeCache() {
|
||||
delete[] _blobs;
|
||||
}
|
||||
|
||||
const void* maxAddress() const {
|
||||
return _max_address;
|
||||
}
|
||||
|
||||
bool contains(const void* address) const {
|
||||
bool contains(const void* address) {
|
||||
return address >= _min_address && address < _max_address;
|
||||
}
|
||||
|
||||
void setTextBase(const char* text_base) {
|
||||
_text_base = text_base;
|
||||
}
|
||||
void add(const void* start, int length, jmethodID method, bool update_bounds = false);
|
||||
void remove(const void* start, jmethodID method);
|
||||
jmethodID find(const void* address);
|
||||
};
|
||||
|
||||
void setPlt(unsigned int plt_offset, unsigned int plt_size) {
|
||||
_plt_offset = plt_offset;
|
||||
_plt_size = plt_size;
|
||||
}
|
||||
|
||||
bool hasDebugSymbols() const {
|
||||
return _debug_symbols;
|
||||
}
|
||||
class NativeCodeCache : public CodeCache {
|
||||
private:
|
||||
char* _name;
|
||||
|
||||
void setDebugSymbols(bool debug_symbols) {
|
||||
_debug_symbols = debug_symbols;
|
||||
public:
|
||||
NativeCodeCache(const char* name,
|
||||
const void* min_address = NO_MIN_ADDRESS,
|
||||
const void* max_address = NO_MAX_ADDRESS);
|
||||
|
||||
~NativeCodeCache();
|
||||
|
||||
const char* name() {
|
||||
return _name;
|
||||
}
|
||||
|
||||
void add(const void* start, int length, const char* name, bool update_bounds = false);
|
||||
void updateBounds(const void* start, const void* end);
|
||||
void sort();
|
||||
|
||||
template <typename NamePredicate>
|
||||
inline void mark(NamePredicate predicate, char value) {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
const char* blob_name = _blobs[i]._name;
|
||||
if (blob_name != NULL && predicate(blob_name)) {
|
||||
NativeFunc::mark(blob_name, value);
|
||||
}
|
||||
}
|
||||
|
||||
if (value == MARK_VM_RUNTIME && _name != NULL) {
|
||||
// In case a library has no debug symbols
|
||||
NativeFunc::mark(_name, value);
|
||||
}
|
||||
}
|
||||
|
||||
void addImport(void** entry, const char* name);
|
||||
void** findImport(ImportId id);
|
||||
void patchImport(ImportId, void* hook_func);
|
||||
|
||||
CodeBlob* findBlob(const char* name);
|
||||
CodeBlob* findBlobByAddress(const void* address);
|
||||
const char* binarySearch(const void* address);
|
||||
const void* findSymbol(const char* name);
|
||||
const void* findSymbolByPrefix(const char* prefix);
|
||||
const void* findSymbolByPrefix(const char* prefix, int prefix_len);
|
||||
|
||||
void setDwarfTable(FrameDesc* table, int length);
|
||||
FrameDesc* findFrameDesc(const void* pc);
|
||||
|
||||
size_t usedMemory();
|
||||
};
|
||||
|
||||
|
||||
class CodeCacheArray {
|
||||
private:
|
||||
CodeCache* _libs[MAX_NATIVE_LIBS];
|
||||
int _count;
|
||||
|
||||
public:
|
||||
CodeCacheArray() : _count(0) {
|
||||
}
|
||||
|
||||
CodeCache* operator[](int index) {
|
||||
return _libs[index];
|
||||
}
|
||||
|
||||
int count() {
|
||||
return __atomic_load_n(&_count, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
void add(CodeCache* lib) {
|
||||
int index = __atomic_load_n(&_count, __ATOMIC_ACQUIRE);
|
||||
_libs[index] = lib;
|
||||
__atomic_store_n(&_count, index + 1, __ATOMIC_RELEASE);
|
||||
}
|
||||
};
|
||||
|
||||
#endif // _CODECACHE_H
|
||||
|
||||
442
src/converter/FlameGraph.java
Normal file
@@ -0,0 +1,442 @@
|
||||
/*
|
||||
* Copyright 2020 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.PrintStream;
|
||||
import java.io.Reader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
public class FlameGraph {
|
||||
public String title = "Flame Graph";
|
||||
public boolean reverse;
|
||||
public double minwidth;
|
||||
public int skip;
|
||||
public String input;
|
||||
public String output;
|
||||
|
||||
private final Frame root = new Frame();
|
||||
private int depth;
|
||||
private long mintotal;
|
||||
|
||||
public FlameGraph(String... args) {
|
||||
for (int i = 0; i < args.length; i++) {
|
||||
String arg = args[i];
|
||||
if (!arg.startsWith("--") && !arg.isEmpty()) {
|
||||
if (input == null) {
|
||||
input = arg;
|
||||
} else {
|
||||
output = arg;
|
||||
}
|
||||
} else if (arg.equals("--title")) {
|
||||
title = args[++i];
|
||||
} else if (arg.equals("--reverse")) {
|
||||
reverse = true;
|
||||
} else if (arg.equals("--minwidth")) {
|
||||
minwidth = Double.parseDouble(args[++i]);
|
||||
} else if (arg.equals("--skip")) {
|
||||
skip = Integer.parseInt(args[++i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void parse() throws IOException {
|
||||
parse(new InputStreamReader(new FileInputStream(input), StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
public void parse(Reader in) throws IOException {
|
||||
try (BufferedReader br = new BufferedReader(in)) {
|
||||
for (String line; (line = br.readLine()) != null; ) {
|
||||
int space = line.lastIndexOf(' ');
|
||||
if (space <= 0) continue;
|
||||
|
||||
String[] trace = line.substring(0, space).split(";");
|
||||
long ticks = Long.parseLong(line.substring(space + 1));
|
||||
addSample(trace, ticks);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void addSample(String[] trace, long ticks) {
|
||||
Frame frame = root;
|
||||
if (reverse) {
|
||||
for (int i = trace.length; --i >= skip; ) {
|
||||
frame.total += ticks;
|
||||
frame = frame.child(trace[i]);
|
||||
}
|
||||
} else {
|
||||
for (int i = skip; i < trace.length; i++) {
|
||||
frame.total += ticks;
|
||||
frame = frame.child(trace[i]);
|
||||
}
|
||||
}
|
||||
frame.total += ticks;
|
||||
frame.self += ticks;
|
||||
|
||||
depth = Math.max(depth, trace.length);
|
||||
}
|
||||
|
||||
public void dump() throws IOException {
|
||||
if (output == null) {
|
||||
dump(System.out);
|
||||
} else {
|
||||
try (BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(output), 32768);
|
||||
PrintStream out = new PrintStream(bos, false, "UTF-8")) {
|
||||
dump(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void dump(PrintStream out) {
|
||||
out.print(applyReplacements(HEADER,
|
||||
"{title}", title,
|
||||
"{height}", (depth + 1) * 16,
|
||||
"{depth}", depth + 1,
|
||||
"{reverse}", reverse));
|
||||
|
||||
mintotal = (long) (root.total * minwidth / 100);
|
||||
printFrame(out, "all", root, 0, 0);
|
||||
|
||||
out.print(FOOTER);
|
||||
}
|
||||
|
||||
// Replace ${variables} in the given string with field values
|
||||
private String applyReplacements(String s, Object... params) {
|
||||
StringBuilder result = new StringBuilder(s.length() + 256);
|
||||
|
||||
int p = 0;
|
||||
for (int q; (q = s.indexOf('$', p)) >= 0; ) {
|
||||
result.append(s, p, q);
|
||||
p = s.indexOf('}', q + 2) + 1;
|
||||
String var = s.substring(q + 1, p);
|
||||
for (int i = 0; i < params.length; i += 2) {
|
||||
if (var.equals(params[i])) {
|
||||
result.append(params[i + 1]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.append(s, p, s.length());
|
||||
return result.toString();
|
||||
}
|
||||
|
||||
private void printFrame(PrintStream out, String title, Frame frame, int level, long x) {
|
||||
int type = frameType(title);
|
||||
title = stripSuffix(title);
|
||||
if (title.indexOf('\'') >= 0) {
|
||||
title = title.replace("'", "\\'");
|
||||
}
|
||||
|
||||
out.println("f(" + level + "," + x + "," + frame.total + "," + type + ",'" + title + "')");
|
||||
|
||||
x += frame.self;
|
||||
for (Map.Entry<String, Frame> e : frame.entrySet()) {
|
||||
Frame child = e.getValue();
|
||||
if (child.total >= mintotal) {
|
||||
printFrame(out, e.getKey(), child, level + 1, x);
|
||||
}
|
||||
x += child.total;
|
||||
}
|
||||
}
|
||||
|
||||
private String stripSuffix(String title) {
|
||||
int len = title.length();
|
||||
if (len >= 4 && title.charAt(len - 1) == ']' && title.regionMatches(len - 4, "_[", 0, 2)) {
|
||||
return title.substring(0, len - 4);
|
||||
}
|
||||
return title;
|
||||
}
|
||||
|
||||
private int frameType(String title) {
|
||||
if (title.endsWith("_[j]")) {
|
||||
return 0;
|
||||
} else if (title.endsWith("_[i]")) {
|
||||
return 1;
|
||||
} else if (title.endsWith("_[k]")) {
|
||||
return 2;
|
||||
} else if (title.contains("::") || title.startsWith("-[") || title.startsWith("+[")) {
|
||||
return 3;
|
||||
} else if (title.indexOf('/') > 0 || title.indexOf('.') > 0 && Character.isUpperCase(title.charAt(0))) {
|
||||
return 0;
|
||||
} else {
|
||||
return 4;
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
FlameGraph fg = new FlameGraph(args);
|
||||
if (fg.input == null) {
|
||||
System.out.println("Usage: java " + FlameGraph.class.getName() + " [options] input.collapsed [output.html]");
|
||||
System.out.println();
|
||||
System.out.println("Options:");
|
||||
System.out.println(" --title TITLE");
|
||||
System.out.println(" --reverse");
|
||||
System.out.println(" --minwidth PERCENT");
|
||||
System.out.println(" --skip FRAMES");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
fg.parse();
|
||||
fg.dump();
|
||||
}
|
||||
|
||||
static class Frame extends TreeMap<String, Frame> {
|
||||
long total;
|
||||
long self;
|
||||
|
||||
Frame child(String title) {
|
||||
Frame child = get(title);
|
||||
if (child == null) {
|
||||
put(title, child = new Frame());
|
||||
}
|
||||
return child;
|
||||
}
|
||||
}
|
||||
|
||||
private static final String HEADER = "<!DOCTYPE html>\n" +
|
||||
"<html lang='en'>\n" +
|
||||
"<head>\n" +
|
||||
"<meta charset='utf-8'>\n" +
|
||||
"<style>\n" +
|
||||
"\tbody {margin: 0; padding: 10px; background-color: #ffffff}\n" +
|
||||
"\th1 {margin: 5px 0 0 0; font-size: 18px; font-weight: normal; text-align: center}\n" +
|
||||
"\theader {margin: -24px 0 5px 0; line-height: 24px}\n" +
|
||||
"\tbutton {font: 12px sans-serif; cursor: pointer}\n" +
|
||||
"\tp {margin: 5px 0 5px 0}\n" +
|
||||
"\ta {color: #0366d6}\n" +
|
||||
"\t#hl {position: absolute; display: none; overflow: hidden; white-space: nowrap; pointer-events: none; background-color: #ffffe0; outline: 1px solid #ffc000; height: 15px}\n" +
|
||||
"\t#hl span {padding: 0 3px 0 3px}\n" +
|
||||
"\t#status {overflow: hidden; white-space: nowrap}\n" +
|
||||
"\t#match {overflow: hidden; white-space: nowrap; display: none; float: right; text-align: right}\n" +
|
||||
"\t#reset {cursor: pointer}\n" +
|
||||
"</style>\n" +
|
||||
"</head>\n" +
|
||||
"<body style='font: 12px Verdana, sans-serif'>\n" +
|
||||
"<h1>${title}</h1>\n" +
|
||||
"<header style='text-align: left'><button id='reverse' title='Reverse'>🔻</button> <button id='search' title='Search'>🔍</button></header>\n" +
|
||||
"<header style='text-align: right'>Produced by <a href='https://github.com/jvm-profiling-tools/async-profiler'>async-profiler</a></header>\n" +
|
||||
"<canvas id='canvas' style='width: 100%; height: ${height}px'></canvas>\n" +
|
||||
"<div id='hl'><span></span></div>\n" +
|
||||
"<p id='match'>Matched: <span id='matchval'></span> <span id='reset' title='Clear'>❌</span></p>\n" +
|
||||
"<p id='status'> </p>\n" +
|
||||
"<script>\n" +
|
||||
"\t// Copyright 2020 Andrei Pangin\n" +
|
||||
"\t// Licensed under the Apache License, Version 2.0.\n" +
|
||||
"\t'use strict';\n" +
|
||||
"\tvar root, rootLevel, px, pattern;\n" +
|
||||
"\tvar reverse = ${reverse};\n" +
|
||||
"\tconst levels = Array(${depth});\n" +
|
||||
"\tfor (let h = 0; h < levels.length; h++) {\n" +
|
||||
"\t\tlevels[h] = [];\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tconst canvas = document.getElementById('canvas');\n" +
|
||||
"\tconst c = canvas.getContext('2d');\n" +
|
||||
"\tconst hl = document.getElementById('hl');\n" +
|
||||
"\tconst status = document.getElementById('status');\n" +
|
||||
"\n" +
|
||||
"\tconst canvasWidth = canvas.offsetWidth;\n" +
|
||||
"\tconst canvasHeight = canvas.offsetHeight;\n" +
|
||||
"\tcanvas.style.width = canvasWidth + 'px';\n" +
|
||||
"\tcanvas.width = canvasWidth * (devicePixelRatio || 1);\n" +
|
||||
"\tcanvas.height = canvasHeight * (devicePixelRatio || 1);\n" +
|
||||
"\tif (devicePixelRatio) c.scale(devicePixelRatio, devicePixelRatio);\n" +
|
||||
"\tc.font = document.body.style.font;\n" +
|
||||
"\n" +
|
||||
"\tconst palette = [\n" +
|
||||
"\t\t[0x50e150, 30, 30, 30],\n" +
|
||||
"\t\t[0x50bebe, 30, 30, 30],\n" +
|
||||
"\t\t[0xe17d00, 30, 30, 0],\n" +
|
||||
"\t\t[0xc8c83c, 30, 30, 10],\n" +
|
||||
"\t\t[0xe15a5a, 30, 40, 40],\n" +
|
||||
"\t];\n" +
|
||||
"\n" +
|
||||
"\tfunction getColor(p) {\n" +
|
||||
"\t\tconst v = Math.random();\n" +
|
||||
"\t\treturn '#' + (p[0] + ((p[1] * v) << 16 | (p[2] * v) << 8 | (p[3] * v))).toString(16);\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tfunction f(level, left, width, type, title) {\n" +
|
||||
"\t\tlevels[level].push({left: left, width: width, color: getColor(palette[type]), title: title});\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tfunction samples(n) {\n" +
|
||||
"\t\treturn n === 1 ? '1 sample' : n.toString().replace(/\\B(?=(\\d{3})+(?!\\d))/g, ',') + ' samples';\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tfunction pct(a, b) {\n" +
|
||||
"\t\treturn a >= b ? '100' : (100 * a / b).toFixed(2);\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tfunction findFrame(frames, x) {\n" +
|
||||
"\t\tlet left = 0;\n" +
|
||||
"\t\tlet right = frames.length - 1;\n" +
|
||||
"\n" +
|
||||
"\t\twhile (left <= right) {\n" +
|
||||
"\t\t\tconst mid = (left + right) >>> 1;\n" +
|
||||
"\t\t\tconst f = frames[mid];\n" +
|
||||
"\n" +
|
||||
"\t\t\tif (f.left > x) {\n" +
|
||||
"\t\t\t\tright = mid - 1;\n" +
|
||||
"\t\t\t} else if (f.left + f.width <= x) {\n" +
|
||||
"\t\t\t\tleft = mid + 1;\n" +
|
||||
"\t\t\t} else {\n" +
|
||||
"\t\t\t\treturn f;\n" +
|
||||
"\t\t\t}\n" +
|
||||
"\t\t}\n" +
|
||||
"\n" +
|
||||
"\t\tif (frames[left] && (frames[left].left - x) * px < 0.5) return frames[left];\n" +
|
||||
"\t\tif (frames[right] && (x - (frames[right].left + frames[right].width)) * px < 0.5) return frames[right];\n" +
|
||||
"\n" +
|
||||
"\t\treturn null;\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tfunction search(r) {\n" +
|
||||
"\t\tif (r && (r = prompt('Enter regexp to search:', '')) === null) {\n" +
|
||||
"\t\t\treturn;\n" +
|
||||
"\t\t}\n" +
|
||||
"\n" +
|
||||
"\t\tpattern = r ? RegExp(r) : undefined;\n" +
|
||||
"\t\tconst matched = render(root, rootLevel);\n" +
|
||||
"\t\tdocument.getElementById('matchval').textContent = pct(matched, root.width) + '%';\n" +
|
||||
"\t\tdocument.getElementById('match').style.display = r ? 'inherit' : 'none';\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tfunction render(newRoot, newLevel) {\n" +
|
||||
"\t\tif (root) {\n" +
|
||||
"\t\t\tc.fillStyle = '#ffffff';\n" +
|
||||
"\t\t\tc.fillRect(0, 0, canvasWidth, canvasHeight);\n" +
|
||||
"\t\t}\n" +
|
||||
"\n" +
|
||||
"\t\troot = newRoot || levels[0][0];\n" +
|
||||
"\t\trootLevel = newLevel || 0;\n" +
|
||||
"\t\tpx = canvasWidth / root.width;\n" +
|
||||
"\n" +
|
||||
"\t\tconst x0 = root.left;\n" +
|
||||
"\t\tconst x1 = x0 + root.width;\n" +
|
||||
"\t\tconst marked = [];\n" +
|
||||
"\n" +
|
||||
"\t\tfunction mark(f) {\n" +
|
||||
"\t\t\treturn marked[f.left] >= f.width || (marked[f.left] = f.width);\n" +
|
||||
"\t\t}\n" +
|
||||
"\n" +
|
||||
"\t\tfunction totalMarked() {\n" +
|
||||
"\t\t\tlet total = 0;\n" +
|
||||
"\t\t\tlet left = 0;\n" +
|
||||
"\t\t\tfor (let x in marked) {\n" +
|
||||
"\t\t\t\tif (+x >= left) {\n" +
|
||||
"\t\t\t\t\ttotal += marked[x];\n" +
|
||||
"\t\t\t\t\tleft = +x + marked[x];\n" +
|
||||
"\t\t\t\t}\n" +
|
||||
"\t\t\t}\n" +
|
||||
"\t\t\treturn total;\n" +
|
||||
"\t\t}\n" +
|
||||
"\n" +
|
||||
"\t\tfunction drawFrame(f, y, alpha) {\n" +
|
||||
"\t\t\tif (f.left < x1 && f.left + f.width > x0) {\n" +
|
||||
"\t\t\t\tc.fillStyle = pattern && f.title.match(pattern) && mark(f) ? '#ee00ee' : f.color;\n" +
|
||||
"\t\t\t\tc.fillRect((f.left - x0) * px, y, f.width * px, 15);\n" +
|
||||
"\n" +
|
||||
"\t\t\t\tif (f.width * px >= 21) {\n" +
|
||||
"\t\t\t\t\tconst chars = Math.floor(f.width * px / 7);\n" +
|
||||
"\t\t\t\t\tconst title = f.title.length <= chars ? f.title : f.title.substring(0, chars - 2) + '..';\n" +
|
||||
"\t\t\t\t\tc.fillStyle = '#000000';\n" +
|
||||
"\t\t\t\t\tc.fillText(title, Math.max(f.left - x0, 0) * px + 3, y + 12, f.width * px - 6);\n" +
|
||||
"\t\t\t\t}\n" +
|
||||
"\n" +
|
||||
"\t\t\t\tif (alpha) {\n" +
|
||||
"\t\t\t\t\tc.fillStyle = 'rgba(255, 255, 255, 0.5)';\n" +
|
||||
"\t\t\t\t\tc.fillRect((f.left - x0) * px, y, f.width * px, 15);\n" +
|
||||
"\t\t\t\t}\n" +
|
||||
"\t\t\t}\n" +
|
||||
"\t\t}\n" +
|
||||
"\n" +
|
||||
"\t\tfor (let h = 0; h < levels.length; h++) {\n" +
|
||||
"\t\t\tconst y = reverse ? h * 16 : canvasHeight - (h + 1) * 16;\n" +
|
||||
"\t\t\tconst frames = levels[h];\n" +
|
||||
"\t\t\tfor (let i = 0; i < frames.length; i++) {\n" +
|
||||
"\t\t\t\tdrawFrame(frames[i], y, h < rootLevel);\n" +
|
||||
"\t\t\t}\n" +
|
||||
"\t\t}\n" +
|
||||
"\n" +
|
||||
"\t\treturn totalMarked();\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tcanvas.onmousemove = function() {\n" +
|
||||
"\t\tconst h = Math.floor((reverse ? event.offsetY : (canvasHeight - event.offsetY)) / 16);\n" +
|
||||
"\t\tif (h >= 0 && h < levels.length) {\n" +
|
||||
"\t\t\tconst f = findFrame(levels[h], event.offsetX / px + root.left);\n" +
|
||||
"\t\t\tif (f) {\n" +
|
||||
"\t\t\t\thl.style.left = (Math.max(f.left - root.left, 0) * px + canvas.offsetLeft) + 'px';\n" +
|
||||
"\t\t\t\thl.style.width = (Math.min(f.width, root.width) * px) + 'px';\n" +
|
||||
"\t\t\t\thl.style.top = ((reverse ? h * 16 : canvasHeight - (h + 1) * 16) + canvas.offsetTop) + 'px';\n" +
|
||||
"\t\t\t\thl.firstChild.textContent = f.title;\n" +
|
||||
"\t\t\t\thl.style.display = 'block';\n" +
|
||||
"\t\t\t\tcanvas.title = f.title + '\\n(' + samples(f.width) + ', ' + pct(f.width, levels[0][0].width) + '%)';\n" +
|
||||
"\t\t\t\tcanvas.style.cursor = 'pointer';\n" +
|
||||
"\t\t\t\tcanvas.onclick = function() {\n" +
|
||||
"\t\t\t\t\tif (f != root) {\n" +
|
||||
"\t\t\t\t\t\trender(f, h);\n" +
|
||||
"\t\t\t\t\t\tcanvas.onmousemove();\n" +
|
||||
"\t\t\t\t\t}\n" +
|
||||
"\t\t\t\t};\n" +
|
||||
"\t\t\t\tstatus.textContent = 'Function: ' + canvas.title;\n" +
|
||||
"\t\t\t\treturn;\n" +
|
||||
"\t\t\t}\n" +
|
||||
"\t\t}\n" +
|
||||
"\t\tcanvas.onmouseout();\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tcanvas.onmouseout = function() {\n" +
|
||||
"\t\thl.style.display = 'none';\n" +
|
||||
"\t\tstatus.textContent = '\\xa0';\n" +
|
||||
"\t\tcanvas.title = '';\n" +
|
||||
"\t\tcanvas.style.cursor = '';\n" +
|
||||
"\t\tcanvas.onclick = '';\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tdocument.getElementById('reverse').onclick = function() {\n" +
|
||||
"\t\treverse = !reverse;\n" +
|
||||
"\t\trender();\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tdocument.getElementById('search').onclick = function() {\n" +
|
||||
"\t\tsearch(true);\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\tdocument.getElementById('reset').onclick = function() {\n" +
|
||||
"\t\tsearch(false);\n" +
|
||||
"\t}\n" +
|
||||
"\n" +
|
||||
"\twindow.onkeydown = function() {\n" +
|
||||
"\t\tif (event.ctrlKey && event.keyCode === 70) {\n" +
|
||||
"\t\t\tevent.preventDefault();\n" +
|
||||
"\t\t\tsearch(true);\n" +
|
||||
"\t\t} else if (event.keyCode === 27) {\n" +
|
||||
"\t\t\tsearch(false);\n" +
|
||||
"\t\t}\n" +
|
||||
"\t}\n";
|
||||
|
||||
private static final String FOOTER = "render();\n" +
|
||||
"</script></body></html>\n";
|
||||
}
|
||||
1
src/converter/MANIFEST.MF
Normal file
@@ -0,0 +1 @@
|
||||
Main-Class: Main
|
||||
@@ -1,130 +1,31 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2020 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import one.convert.*;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Main entry point of jar.
|
||||
* Lists available converters.
|
||||
*/
|
||||
public class Main {
|
||||
|
||||
public static void main(String[] argv) throws Exception {
|
||||
Arguments args = new Arguments(argv);
|
||||
if (args.help || args.files.isEmpty()) {
|
||||
usage();
|
||||
return;
|
||||
}
|
||||
|
||||
if (args.files.size() == 1) {
|
||||
args.files.add(".");
|
||||
}
|
||||
|
||||
int fileCount = args.files.size() - 1;
|
||||
String lastFile = args.files.get(fileCount);
|
||||
boolean isDirectory = new File(lastFile).isDirectory();
|
||||
|
||||
if (args.output == null) {
|
||||
int ext;
|
||||
if (!isDirectory && (ext = lastFile.lastIndexOf('.')) > 0) {
|
||||
args.output = lastFile.substring(ext + 1);
|
||||
} else {
|
||||
args.output = "html";
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < fileCount; i++) {
|
||||
String input = args.files.get(i);
|
||||
String output = isDirectory ? new File(lastFile, replaceExt(input, args.output)).getPath() : lastFile;
|
||||
|
||||
System.out.print("Converting " + getFileName(input) + " -> " + getFileName(output) + " ");
|
||||
System.out.flush();
|
||||
|
||||
long startTime = System.nanoTime();
|
||||
convert(input, output, args);
|
||||
long endTime = System.nanoTime();
|
||||
|
||||
System.out.print("# " + (endTime - startTime) / 1000000 / 1000.0 + " s\n");
|
||||
}
|
||||
}
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
if (isJfr(input)) {
|
||||
if ("html".equals(args.output) || "collapsed".equals(args.output)) {
|
||||
JfrToFlame.convert(input, output, args);
|
||||
} else if ("pprof".equals(args.output) || "pb".equals(args.output) || args.output.endsWith("gz")) {
|
||||
JfrToPprof.convert(input, output, args);
|
||||
} else if ("heatmap".equals(args.output)) {
|
||||
JfrToHeatmap.convert(input, output, args);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unrecognized output format: " + args.output);
|
||||
}
|
||||
} else {
|
||||
FlameGraph.convert(input, output, args);
|
||||
}
|
||||
}
|
||||
|
||||
private static String getFileName(String fileName) {
|
||||
return fileName.substring(fileName.lastIndexOf(File.separatorChar) + 1);
|
||||
}
|
||||
|
||||
private static String replaceExt(String fileName, String output) {
|
||||
String ext = "heatmap".equals(output) ? "html" : output;
|
||||
int slash = fileName.lastIndexOf(File.separatorChar);
|
||||
int dot = fileName.lastIndexOf('.');
|
||||
return dot > slash ? fileName.substring(slash + 1, dot + 1) + ext : fileName.substring(slash + 1) + '.' + ext;
|
||||
}
|
||||
|
||||
private static boolean isJfr(String fileName) throws IOException {
|
||||
if (fileName.endsWith(".jfr")) {
|
||||
return true;
|
||||
} else if (fileName.endsWith(".collapsed") || fileName.endsWith(".txt") || fileName.endsWith(".csv")) {
|
||||
return false;
|
||||
}
|
||||
byte[] buf = new byte[4];
|
||||
try (FileInputStream fis = new FileInputStream(fileName)) {
|
||||
return fis.read(buf) == 4 && buf[0] == 'F' && buf[1] == 'L' && buf[2] == 'R' && buf[3] == 0;
|
||||
}
|
||||
}
|
||||
|
||||
private static void usage() {
|
||||
System.out.print("Usage: jfrconv [options] <input> [<input>...] <output>\n" +
|
||||
"\n" +
|
||||
"Conversion options:\n" +
|
||||
" -o --output FORMAT Output format: html, collapsed, pprof, pb.gz, heatmap\n" +
|
||||
"\n" +
|
||||
"JFR options:\n" +
|
||||
" --cpu CPU profile\n" +
|
||||
" --wall Wall clock profile\n" +
|
||||
" --alloc Allocation profile\n" +
|
||||
" --live Live object profile\n" +
|
||||
" --nativemem malloc profile\n" +
|
||||
" --leak Only include memory leaks in nativemem\n" +
|
||||
" --lock Lock contention profile\n" +
|
||||
" -t --threads Split stack traces by threads\n" +
|
||||
" -s --state LIST Filter thread states: runnable, sleeping\n" +
|
||||
" --classify Classify samples into predefined categories\n" +
|
||||
" --total Accumulate total value (time, bytes, etc.)\n" +
|
||||
" --lines Show line numbers\n" +
|
||||
" --bci Show bytecode indices\n" +
|
||||
" --simple Simple class names instead of FQN\n" +
|
||||
" --norm Normalize names of hidden classes / lambdas\n" +
|
||||
" --dot Dotted class names\n" +
|
||||
" --from TIME Start time in ms (absolute or relative)\n" +
|
||||
" --to TIME End time in ms (absolute or relative)\n" +
|
||||
"\n" +
|
||||
"Flame Graph options:\n" +
|
||||
" --title STRING Flame Graph title\n" +
|
||||
" --minwidth X Skip frames smaller than X%\n" +
|
||||
" --grain X Coarsen Flame Graph to the given grain size\n" +
|
||||
" --skip N Skip N bottom frames\n" +
|
||||
" -r --reverse Reverse stack traces (defaults to icicle graph)\n" +
|
||||
" -i --inverted Toggles the layout for reversed stacktraces from icicle to flamegraph\n" +
|
||||
" and for default stacktraces from flamegraph to icicle\n" +
|
||||
" -I --include REGEX Include only stacks with the specified frames\n" +
|
||||
" -X --exclude REGEX Exclude stacks with the specified frames\n" +
|
||||
" --highlight REGEX Highlight frames matching the given pattern\n");
|
||||
public static void main(String[] args) {
|
||||
System.out.println("Usage: java -cp converter.jar <Converter> [options] <input> <output>");
|
||||
System.out.println();
|
||||
System.out.println("Available converters:");
|
||||
System.out.println(" FlameGraph input.collapsed output.html");
|
||||
System.out.println(" jfr2flame input.jfr output.html");
|
||||
System.out.println(" jfr2nflx input.jfr output.nflx");
|
||||
}
|
||||
}
|
||||
|
||||
92
src/converter/jfr2flame.java
Normal file
@@ -0,0 +1,92 @@
|
||||
/*
|
||||
* Copyright 2020 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import one.jfr.ClassRef;
|
||||
import one.jfr.Dictionary;
|
||||
import one.jfr.JfrReader;
|
||||
import one.jfr.MethodRef;
|
||||
import one.jfr.StackTrace;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
/**
|
||||
* Converts .jfr output produced by async-profiler to HTML Flame Graph.
|
||||
*/
|
||||
public class jfr2flame {
|
||||
|
||||
private static final int FRAME_KERNEL = 5;
|
||||
|
||||
private final JfrReader jfr;
|
||||
private final Dictionary<String> methodNames = new Dictionary<>();
|
||||
|
||||
public jfr2flame(JfrReader jfr) {
|
||||
this.jfr = jfr;
|
||||
}
|
||||
|
||||
public void convert(final FlameGraph fg) {
|
||||
// Don't use lambda for faster startup
|
||||
jfr.stackTraces.forEach(new Dictionary.Visitor<StackTrace>() {
|
||||
@Override
|
||||
public void visit(long id, StackTrace stackTrace) {
|
||||
long[] methods = stackTrace.methods;
|
||||
byte[] types = stackTrace.types;
|
||||
String[] trace = new String[methods.length];
|
||||
for (int i = 0; i < methods.length; i++) {
|
||||
trace[trace.length - 1 - i] = getMethodName(methods[i], types[i]);
|
||||
}
|
||||
fg.addSample(trace, stackTrace.samples);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private String getMethodName(long methodId, int type) {
|
||||
String result = methodNames.get(methodId);
|
||||
if (result != null) {
|
||||
return result;
|
||||
}
|
||||
|
||||
MethodRef method = jfr.methods.get(methodId);
|
||||
ClassRef cls = jfr.classes.get(method.cls);
|
||||
byte[] className = jfr.symbols.get(cls.name);
|
||||
byte[] methodName = jfr.symbols.get(method.name);
|
||||
|
||||
if (className == null || className.length == 0) {
|
||||
String methodStr = new String(methodName, StandardCharsets.UTF_8);
|
||||
result = type == FRAME_KERNEL ? methodStr + "_[k]" : methodStr;
|
||||
} else {
|
||||
String classStr = new String(className, StandardCharsets.UTF_8);
|
||||
String methodStr = new String(methodName, StandardCharsets.UTF_8);
|
||||
result = classStr + '.' + methodStr + "_[j]";
|
||||
}
|
||||
|
||||
methodNames.put(methodId, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
FlameGraph fg = new FlameGraph(args);
|
||||
if (fg.input == null) {
|
||||
System.out.println("Usage: java " + jfr2flame.class.getName() + " [options] input.jfr [output.html]");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
try (JfrReader jfr = new JfrReader(fg.input)) {
|
||||
new jfr2flame(jfr).convert(fg);
|
||||
}
|
||||
|
||||
fg.dump();
|
||||
}
|
||||
}
|
||||
161
src/converter/jfr2nflx.java
Normal file
@@ -0,0 +1,161 @@
|
||||
/*
|
||||
* Copyright 2020 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import one.jfr.ClassRef;
|
||||
import one.jfr.Dictionary;
|
||||
import one.jfr.JfrReader;
|
||||
import one.jfr.MethodRef;
|
||||
import one.jfr.Sample;
|
||||
import one.jfr.StackTrace;
|
||||
import one.proto.Proto;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* Converts .jfr output produced by async-profiler to nflxprofile format
|
||||
* as described in https://github.com/Netflix/nflxprofile/blob/master/nflxprofile.proto.
|
||||
* The result nflxprofile can be opened and analyzed with FlameScope.
|
||||
*/
|
||||
public class jfr2nflx {
|
||||
|
||||
private static final String[] FRAME_TYPE = {"jit", "jit", "inlined", "user", "user", "kernel"};
|
||||
private static final byte[] NO_STACK = "[no_stack]".getBytes();
|
||||
|
||||
private final JfrReader jfr;
|
||||
|
||||
public jfr2nflx(JfrReader jfr) {
|
||||
this.jfr = jfr;
|
||||
}
|
||||
|
||||
public void dump(OutputStream out) throws IOException {
|
||||
long startTime = System.nanoTime();
|
||||
|
||||
int samples = jfr.samples.size();
|
||||
long durationTicks = samples == 0 ? 0 : jfr.samples.get(samples - 1).time - jfr.startTicks + 1;
|
||||
|
||||
final Proto profile = new Proto(200000)
|
||||
.field(1, 0.0)
|
||||
.field(2, Math.max(jfr.durationNanos / 1e9, durationTicks / (double) jfr.ticksPerSec))
|
||||
.field(3, packSamples())
|
||||
.field(4, packDeltas())
|
||||
.field(6, "async-profiler")
|
||||
.field(8, new Proto(32).field(1, "has_node_stack").field(2, "true"))
|
||||
.field(8, new Proto(32).field(1, "has_samples_tid").field(2, "true"))
|
||||
.field(11, packTids());
|
||||
|
||||
final Proto nodes = new Proto(10000);
|
||||
final Proto node = new Proto(10000);
|
||||
|
||||
// Don't use lambda for faster startup
|
||||
jfr.stackTraces.forEach(new Dictionary.Visitor<StackTrace>() {
|
||||
@Override
|
||||
public void visit(long id, StackTrace stackTrace) {
|
||||
profile.field(5, nodes
|
||||
.field(1, (int) id)
|
||||
.field(2, packNode(node, stackTrace)));
|
||||
nodes.reset();
|
||||
node.reset();
|
||||
}
|
||||
});
|
||||
|
||||
out.write(profile.buffer(), 0, profile.size());
|
||||
|
||||
long endTime = System.nanoTime();
|
||||
System.out.println("Wrote " + profile.size() + " bytes in " + (endTime - startTime) / 1e9 + " s");
|
||||
}
|
||||
|
||||
private Proto packNode(Proto node, StackTrace stackTrace) {
|
||||
long[] methods = stackTrace.methods;
|
||||
byte[] types = stackTrace.types;
|
||||
int top = methods.length - 1;
|
||||
|
||||
node.field(1, top >= 0 ? getMethodName(methods[top]) : NO_STACK);
|
||||
node.field(2, 1);
|
||||
node.field(4, top >= 0 ? FRAME_TYPE[types[top]] : "user");
|
||||
|
||||
for (Proto frame = new Proto(100); --top >= 0; frame.reset()) {
|
||||
node.field(10, frame
|
||||
.field(1, getMethodName(methods[top]))
|
||||
.field(2, FRAME_TYPE[types[top]]));
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
private Proto packSamples() {
|
||||
Proto proto = new Proto(10000);
|
||||
for (Sample sample : jfr.samples) {
|
||||
proto.writeInt(sample.stackTraceId);
|
||||
}
|
||||
return proto;
|
||||
}
|
||||
|
||||
private Proto packDeltas() {
|
||||
Proto proto = new Proto(10000);
|
||||
double ticksPerSec = jfr.ticksPerSec;
|
||||
long prevTime = jfr.startTicks;
|
||||
for (Sample sample : jfr.samples) {
|
||||
proto.writeDouble((sample.time - prevTime) / ticksPerSec);
|
||||
prevTime = sample.time;
|
||||
}
|
||||
return proto;
|
||||
}
|
||||
|
||||
private Proto packTids() {
|
||||
Proto proto = new Proto(10000);
|
||||
for (Sample sample : jfr.samples) {
|
||||
proto.writeInt(sample.tid);
|
||||
}
|
||||
return proto;
|
||||
}
|
||||
|
||||
private byte[] getMethodName(long methodId) {
|
||||
MethodRef method = jfr.methods.get(methodId);
|
||||
ClassRef cls = jfr.classes.get(method.cls);
|
||||
byte[] className = jfr.symbols.get(cls.name);
|
||||
byte[] methodName = jfr.symbols.get(method.name);
|
||||
|
||||
if (className == null || className.length == 0) {
|
||||
return methodName;
|
||||
} else {
|
||||
byte[] fullName = Arrays.copyOf(className, className.length + 1 + methodName.length);
|
||||
fullName[className.length] = '.';
|
||||
System.arraycopy(methodName, 0, fullName, className.length + 1, methodName.length);
|
||||
return fullName;
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (args.length < 2) {
|
||||
System.out.println("Usage: java " + jfr2nflx.class.getName() + " input.jfr output.nflx");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
File dst = new File(args[1]);
|
||||
if (dst.isDirectory()) {
|
||||
dst = new File(dst, new File(args[0]).getName().replace(".jfr", ".nflx"));
|
||||
}
|
||||
|
||||
try (JfrReader jfr = new JfrReader(args[0]);
|
||||
FileOutputStream out = new FileOutputStream(dst)) {
|
||||
new jfr2nflx(jfr).dump(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,128 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.*;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class Arguments {
|
||||
public String title = "Flame Graph";
|
||||
public String highlight;
|
||||
public String output;
|
||||
public String state;
|
||||
public Pattern include;
|
||||
public Pattern exclude;
|
||||
public double minwidth;
|
||||
public double grain;
|
||||
public int skip;
|
||||
public boolean help;
|
||||
public boolean reverse;
|
||||
public boolean inverted;
|
||||
public boolean cpu;
|
||||
public boolean wall;
|
||||
public boolean alloc;
|
||||
public boolean nativemem;
|
||||
public boolean leak;
|
||||
public boolean live;
|
||||
public boolean lock;
|
||||
public boolean threads;
|
||||
public boolean classify;
|
||||
public boolean total;
|
||||
public boolean lines;
|
||||
public boolean bci;
|
||||
public boolean simple;
|
||||
public boolean norm;
|
||||
public boolean dot;
|
||||
public long from;
|
||||
public long to;
|
||||
public final List<String> files = new ArrayList<>();
|
||||
|
||||
public Arguments(String... args) {
|
||||
for (int i = 0; i < args.length; i++) {
|
||||
String arg = args[i];
|
||||
String fieldName;
|
||||
if (arg.startsWith("--")) {
|
||||
fieldName = arg.substring(2);
|
||||
} else if (arg.startsWith("-") && arg.length() == 2) {
|
||||
fieldName = alias(arg.charAt(1));
|
||||
} else {
|
||||
files.add(arg);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
Field f = Arguments.class.getDeclaredField(fieldName);
|
||||
if ((f.getModifiers() & (Modifier.PRIVATE | Modifier.STATIC | Modifier.FINAL)) != 0) {
|
||||
throw new IllegalArgumentException(arg);
|
||||
}
|
||||
|
||||
Class<?> type = f.getType();
|
||||
if (type == String.class) {
|
||||
f.set(this, args[++i]);
|
||||
} else if (type == boolean.class) {
|
||||
f.setBoolean(this, true);
|
||||
} else if (type == int.class) {
|
||||
f.setInt(this, Integer.parseInt(args[++i]));
|
||||
} else if (type == double.class) {
|
||||
f.setDouble(this, Double.parseDouble(args[++i]));
|
||||
} else if (type == long.class) {
|
||||
f.setLong(this, parseTimestamp(args[++i]));
|
||||
} else if (type == Pattern.class) {
|
||||
f.set(this, Pattern.compile(args[++i]));
|
||||
}
|
||||
} catch (NoSuchFieldException | IllegalAccessException e) {
|
||||
throw new IllegalArgumentException(arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String alias(char c) {
|
||||
switch (c) {
|
||||
case 'h':
|
||||
return "help";
|
||||
case 'o':
|
||||
return "output";
|
||||
case 'r':
|
||||
return "reverse";
|
||||
case 'i':
|
||||
return "inverted";
|
||||
case 'I':
|
||||
return "include";
|
||||
case 'X':
|
||||
return "exclude";
|
||||
case 't':
|
||||
return "threads";
|
||||
case 's':
|
||||
return "state";
|
||||
default:
|
||||
return String.valueOf(c);
|
||||
}
|
||||
}
|
||||
|
||||
// Milliseconds or HH:mm:ss.S or yyyy-MM-dd'T'HH:mm:ss.S
|
||||
private static long parseTimestamp(String time) {
|
||||
if (time.indexOf(':') < 0) {
|
||||
return Long.parseLong(time);
|
||||
}
|
||||
|
||||
GregorianCalendar cal = new GregorianCalendar();
|
||||
StringTokenizer st = new StringTokenizer(time, "-:.T");
|
||||
|
||||
if (time.indexOf('T') > 0) {
|
||||
cal.set(Calendar.YEAR, Integer.parseInt(st.nextToken()));
|
||||
cal.set(Calendar.MONTH, Integer.parseInt(st.nextToken()) - 1);
|
||||
cal.set(Calendar.DAY_OF_MONTH, Integer.parseInt(st.nextToken()));
|
||||
}
|
||||
cal.set(Calendar.HOUR_OF_DAY, st.hasMoreTokens() ? Integer.parseInt(st.nextToken()) : 0);
|
||||
cal.set(Calendar.MINUTE, st.hasMoreTokens() ? Integer.parseInt(st.nextToken()) : 0);
|
||||
cal.set(Calendar.SECOND, st.hasMoreTokens() ? Integer.parseInt(st.nextToken()) : 0);
|
||||
cal.set(Calendar.MILLISECOND, st.hasMoreTokens() ? Integer.parseInt(st.nextToken()) : 0);
|
||||
|
||||
return cal.getTimeInMillis();
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class CallStack {
|
||||
String[] names = new String[16];
|
||||
byte[] types = new byte[16];
|
||||
int size;
|
||||
|
||||
public void push(String name, byte type) {
|
||||
if (size >= names.length) {
|
||||
names = Arrays.copyOf(names, size * 2);
|
||||
types = Arrays.copyOf(types, size * 2);
|
||||
}
|
||||
names[size] = name;
|
||||
types[size] = type;
|
||||
size++;
|
||||
}
|
||||
|
||||
public void pop() {
|
||||
size--;
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
size = 0;
|
||||
}
|
||||
}
|
||||
@@ -1,146 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import one.jfr.StackTrace;
|
||||
|
||||
import static one.convert.Frame.*;
|
||||
|
||||
abstract class Classifier {
|
||||
|
||||
enum Category {
|
||||
GC("[gc]", TYPE_CPP),
|
||||
JIT("[jit]", TYPE_CPP),
|
||||
VM("[vm]", TYPE_CPP),
|
||||
VTABLE_STUBS("[vtable_stubs]", TYPE_NATIVE),
|
||||
NATIVE("[native]", TYPE_NATIVE),
|
||||
INTERPRETER("[Interpreter]", TYPE_NATIVE),
|
||||
C1_COMP("[c1_comp]", TYPE_C1_COMPILED),
|
||||
C2_COMP("[c2_comp]", TYPE_INLINED),
|
||||
ADAPTER("[c2i_adapter]", TYPE_INLINED),
|
||||
CLASS_INIT("[class_init]", TYPE_CPP),
|
||||
CLASS_LOAD("[class_load]", TYPE_CPP),
|
||||
CLASS_RESOLVE("[class_resolve]", TYPE_CPP),
|
||||
CLASS_VERIFY("[class_verify]", TYPE_CPP),
|
||||
LAMBDA_INIT("[lambda_init]", TYPE_CPP);
|
||||
|
||||
final String title;
|
||||
final byte type;
|
||||
|
||||
Category(String title, byte type) {
|
||||
this.title = title;
|
||||
this.type = type;
|
||||
}
|
||||
}
|
||||
|
||||
public Category getCategory(StackTrace stackTrace) {
|
||||
long[] methods = stackTrace.methods;
|
||||
byte[] types = stackTrace.types;
|
||||
|
||||
Category category;
|
||||
if ((category = detectGcJit(methods, types)) == null &&
|
||||
(category = detectClassLoading(methods, types)) == null) {
|
||||
category = detectOther(methods, types);
|
||||
}
|
||||
return category;
|
||||
}
|
||||
|
||||
private Category detectGcJit(long[] methods, byte[] types) {
|
||||
boolean vmThread = false;
|
||||
for (int i = types.length; --i >= 0; ) {
|
||||
if (types[i] == TYPE_CPP) {
|
||||
switch (getMethodName(methods[i], types[i])) {
|
||||
case "CompileBroker::compiler_thread_loop":
|
||||
return Category.JIT;
|
||||
case "GCTaskThread::run":
|
||||
case "WorkerThread::run":
|
||||
return Category.GC;
|
||||
case "java_start":
|
||||
case "thread_native_entry":
|
||||
vmThread = true;
|
||||
break;
|
||||
}
|
||||
} else if (types[i] != TYPE_NATIVE) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return vmThread ? Category.VM : null;
|
||||
}
|
||||
|
||||
private Category detectClassLoading(long[] methods, byte[] types) {
|
||||
for (int i = 0; i < methods.length; i++) {
|
||||
String methodName = getMethodName(methods[i], types[i]);
|
||||
if (methodName.equals("Verifier::verify")) {
|
||||
return Category.CLASS_VERIFY;
|
||||
} else if (methodName.startsWith("InstanceKlass::initialize")) {
|
||||
return Category.CLASS_INIT;
|
||||
} else if (methodName.startsWith("LinkResolver::") ||
|
||||
methodName.startsWith("InterpreterRuntime::resolve") ||
|
||||
methodName.startsWith("SystemDictionary::resolve")) {
|
||||
return Category.CLASS_RESOLVE;
|
||||
} else if (methodName.endsWith("ClassLoader.loadClass")) {
|
||||
return Category.CLASS_LOAD;
|
||||
} else if (methodName.endsWith("LambdaMetafactory.metafactory") ||
|
||||
methodName.endsWith("LambdaMetafactory.altMetafactory")) {
|
||||
return Category.LAMBDA_INIT;
|
||||
} else if (methodName.endsWith("table stub")) {
|
||||
return Category.VTABLE_STUBS;
|
||||
} else if (methodName.equals("Interpreter")) {
|
||||
return Category.INTERPRETER;
|
||||
} else if (methodName.startsWith("I2C/C2I")) {
|
||||
return i + 1 < types.length && types[i + 1] == TYPE_INTERPRETED ? Category.INTERPRETER : Category.ADAPTER;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private Category detectOther(long[] methods, byte[] types) {
|
||||
boolean inJava = true;
|
||||
for (int i = 0; i < types.length; i++) {
|
||||
switch (types[i]) {
|
||||
case TYPE_INTERPRETED:
|
||||
return inJava ? Category.INTERPRETER : Category.NATIVE;
|
||||
case TYPE_JIT_COMPILED:
|
||||
return inJava ? Category.C2_COMP : Category.NATIVE;
|
||||
case TYPE_INLINED:
|
||||
inJava = true;
|
||||
break;
|
||||
case TYPE_NATIVE: {
|
||||
String methodName = getMethodName(methods[i], types[i]);
|
||||
if (methodName.startsWith("JVM_") || methodName.startsWith("Unsafe_") ||
|
||||
methodName.startsWith("MHN_") || methodName.startsWith("jni_")) {
|
||||
return Category.VM;
|
||||
}
|
||||
switch (methodName) {
|
||||
case "call_stub":
|
||||
case "deoptimization":
|
||||
case "unknown_Java":
|
||||
case "not_walkable_Java":
|
||||
case "InlineCacheBuffer":
|
||||
return Category.VM;
|
||||
}
|
||||
if (methodName.endsWith("_arraycopy") || methodName.contains("pthread_cond")) {
|
||||
break;
|
||||
}
|
||||
inJava = false;
|
||||
break;
|
||||
}
|
||||
case TYPE_CPP: {
|
||||
String methodName = getMethodName(methods[i], types[i]);
|
||||
if (methodName.startsWith("Runtime1::")) {
|
||||
return Category.C1_COMP;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TYPE_C1_COMPILED:
|
||||
return inJava ? Category.C1_COMP : Category.NATIVE;
|
||||
}
|
||||
}
|
||||
return Category.NATIVE;
|
||||
}
|
||||
|
||||
protected abstract String getMethodName(long method, byte type);
|
||||
}
|
||||
@@ -1,406 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static one.convert.Frame.*;
|
||||
import static one.convert.ResourceProcessor.*;
|
||||
|
||||
public class FlameGraph implements Comparator<Frame> {
|
||||
private static final Frame[] EMPTY_FRAME_ARRAY = {};
|
||||
private static final String[] FRAME_SUFFIX = {"_[0]", "_[j]", "_[i]", "", "", "_[k]", "_[1]"};
|
||||
private static final byte HAS_SUFFIX = (byte) 0x80;
|
||||
private static final int FLUSH_THRESHOLD = 15000;
|
||||
private static final Pattern TID_FRAME_PATTERN = Pattern.compile("\\[(.* )?tid=\\d+]");
|
||||
|
||||
private final Arguments args;
|
||||
private final Index<String> cpool = new Index<>(String.class, "");
|
||||
private final Frame root = new Frame(0, TYPE_NATIVE);
|
||||
private final StringBuilder outbuf = new StringBuilder(FLUSH_THRESHOLD + 1000);
|
||||
private int[] order;
|
||||
private int depth;
|
||||
private int lastLevel;
|
||||
private long lastX;
|
||||
private long lastTotal;
|
||||
private long mintotal;
|
||||
|
||||
public FlameGraph(Arguments args) {
|
||||
this.args = args;
|
||||
}
|
||||
|
||||
public void parseCollapsed(Reader in) throws IOException {
|
||||
CallStack stack = new CallStack();
|
||||
|
||||
try (BufferedReader br = new BufferedReader(in)) {
|
||||
for (String line; (line = br.readLine()) != null; ) {
|
||||
int space = line.lastIndexOf(' ');
|
||||
if (space <= 0) continue;
|
||||
|
||||
long ticks = Long.parseLong(line.substring(space + 1));
|
||||
|
||||
for (int from = 0, to; from < space; from = to + 1) {
|
||||
if ((to = line.indexOf(';', from)) < 0) to = space;
|
||||
String name = line.substring(from, to);
|
||||
byte type = detectType(name);
|
||||
if ((type & HAS_SUFFIX) != 0) {
|
||||
name = name.substring(0, name.length() - 4);
|
||||
type ^= HAS_SUFFIX;
|
||||
}
|
||||
stack.push(name, type);
|
||||
}
|
||||
|
||||
addSample(stack, ticks);
|
||||
stack.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void parseHtml(Reader in) throws IOException {
|
||||
Frame[] levels = new Frame[128];
|
||||
int level = 0;
|
||||
long total = 0;
|
||||
boolean needRebuild = args.reverse || args.include != null || args.exclude != null;
|
||||
|
||||
try (BufferedReader br = new BufferedReader(in)) {
|
||||
while (!br.readLine().startsWith("const cpool")) ;
|
||||
br.readLine();
|
||||
|
||||
String s = "";
|
||||
for (String line; (line = br.readLine()).startsWith("'"); ) {
|
||||
String packed = unescape(line.substring(1, line.lastIndexOf('\'')));
|
||||
s = s.substring(0, packed.charAt(0) - ' ').concat(packed.substring(1));
|
||||
cpool.put(s, cpool.size());
|
||||
}
|
||||
|
||||
while (!br.readLine().isEmpty()) ;
|
||||
|
||||
for (String line; !(line = br.readLine()).isEmpty(); ) {
|
||||
StringTokenizer st = new StringTokenizer(line.substring(2, line.length() - 1), ",");
|
||||
int nameAndType = Integer.parseInt(st.nextToken());
|
||||
|
||||
char func = line.charAt(0);
|
||||
if (func == 'f') {
|
||||
level = Integer.parseInt(st.nextToken());
|
||||
st.nextToken();
|
||||
} else if (func == 'u') {
|
||||
level++;
|
||||
} else if (func != 'n') {
|
||||
throw new IllegalStateException("Unexpected line: " + line);
|
||||
}
|
||||
|
||||
if (st.hasMoreTokens()) {
|
||||
total = Long.parseLong(st.nextToken());
|
||||
}
|
||||
|
||||
int titleIndex = nameAndType >>> 3;
|
||||
byte type = (byte) (nameAndType & 7);
|
||||
if (st.hasMoreTokens() && (type <= TYPE_INLINED || type >= TYPE_C1_COMPILED)) {
|
||||
type = TYPE_JIT_COMPILED;
|
||||
}
|
||||
|
||||
Frame f = level > 0 || needRebuild ? new Frame(titleIndex, type) : root;
|
||||
f.self = f.total = total;
|
||||
if (st.hasMoreTokens()) f.inlined = Long.parseLong(st.nextToken());
|
||||
if (st.hasMoreTokens()) f.c1 = Long.parseLong(st.nextToken());
|
||||
if (st.hasMoreTokens()) f.interpreted = Long.parseLong(st.nextToken());
|
||||
|
||||
if (level > 0) {
|
||||
Frame parent = levels[level - 1];
|
||||
parent.put(f.key, f);
|
||||
parent.self -= total;
|
||||
depth = Math.max(depth, level);
|
||||
}
|
||||
if (level >= levels.length) {
|
||||
levels = Arrays.copyOf(levels, level * 2);
|
||||
}
|
||||
levels[level] = f;
|
||||
}
|
||||
}
|
||||
|
||||
if (needRebuild) {
|
||||
rebuild(levels[0], new CallStack(), cpool.keys());
|
||||
}
|
||||
}
|
||||
|
||||
private void rebuild(Frame frame, CallStack stack, String[] strings) {
|
||||
if (frame.self > 0) {
|
||||
addSample(stack, frame.self);
|
||||
}
|
||||
if (!frame.isEmpty()) {
|
||||
for (Frame child : frame.values()) {
|
||||
stack.push(strings[child.getTitleIndex()], child.getType());
|
||||
rebuild(child, stack, strings);
|
||||
stack.pop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void addSample(CallStack stack, long ticks) {
|
||||
if (excludeStack(stack)) {
|
||||
return;
|
||||
}
|
||||
|
||||
Frame frame = root;
|
||||
if (args.reverse) {
|
||||
// Retain by-thread grouping, unless thread frame is skipped
|
||||
int skip = args.skip;
|
||||
if (skip == 0 && stack.size > 0 && isThreadFrame(stack.names[0], stack.types[0])) {
|
||||
frame = addChild(frame, stack.names[0], stack.types[0], ticks);
|
||||
skip = 1;
|
||||
}
|
||||
for (int i = stack.size; --i >= skip; ) {
|
||||
frame = addChild(frame, stack.names[i], stack.types[i], ticks);
|
||||
}
|
||||
} else {
|
||||
for (int i = args.skip; i < stack.size; i++) {
|
||||
frame = addChild(frame, stack.names[i], stack.types[i], ticks);
|
||||
}
|
||||
}
|
||||
frame.total += ticks;
|
||||
frame.self += ticks;
|
||||
|
||||
depth = Math.max(depth, stack.size);
|
||||
}
|
||||
|
||||
public void dump(PrintStream out) {
|
||||
mintotal = (long) (root.total * args.minwidth / 100);
|
||||
|
||||
if ("collapsed".equals(args.output)) {
|
||||
printFrameCollapsed(out, root, cpool.keys());
|
||||
return;
|
||||
}
|
||||
|
||||
String tail = getResource("/flame.html");
|
||||
|
||||
tail = printTill(out, tail, "/*height:*/300");
|
||||
int depth = mintotal > 1 ? root.depth(mintotal) : this.depth + 1;
|
||||
out.print(Math.min(depth * 16, 32767));
|
||||
|
||||
tail = printTill(out, tail, "/*title:*/");
|
||||
out.print(args.title);
|
||||
|
||||
// inverted toggles the layout for reversed stacktraces from icicle to flamegraph
|
||||
// and for default stacktraces from flamegraphs to icicle.
|
||||
tail = printTill(out, tail, "/*inverted:*/false");
|
||||
out.print(args.reverse ^ args.inverted);
|
||||
|
||||
tail = printTill(out, tail, "/*depth:*/0");
|
||||
out.print(depth);
|
||||
|
||||
tail = printTill(out, tail, "/*cpool:*/");
|
||||
printCpool(out);
|
||||
|
||||
tail = printTill(out, tail, "/*frames:*/");
|
||||
printFrame(out, root, 0, 0);
|
||||
out.print(outbuf);
|
||||
|
||||
tail = printTill(out, tail, "/*highlight:*/");
|
||||
out.print(args.highlight != null ? "'" + escape(args.highlight) + "'" : "");
|
||||
|
||||
out.print(tail);
|
||||
}
|
||||
|
||||
private void printCpool(PrintStream out) {
|
||||
String[] strings = cpool.keys();
|
||||
Arrays.sort(strings);
|
||||
out.print("'all'");
|
||||
|
||||
order = new int[strings.length];
|
||||
String s = "";
|
||||
for (int i = 1; i < strings.length; i++) {
|
||||
int prefixLen = Math.min(getCommonPrefix(s, s = strings[i]), 95);
|
||||
out.print(",\n'" + escape((char) (prefixLen + ' ') + s.substring(prefixLen)) + "'");
|
||||
order[cpool.get(s)] = i;
|
||||
}
|
||||
|
||||
// cpool is not used beyond this point
|
||||
cpool.clear();
|
||||
}
|
||||
|
||||
private void printFrame(PrintStream out, Frame frame, int level, long x) {
|
||||
int nameAndType = order[frame.getTitleIndex()] << 3 | frame.getType();
|
||||
boolean hasExtraTypes = (frame.inlined | frame.c1 | frame.interpreted) != 0 &&
|
||||
frame.inlined < frame.total && frame.interpreted < frame.total;
|
||||
|
||||
char func = 'f';
|
||||
if (level == lastLevel + 1 && x == lastX) {
|
||||
func = 'u';
|
||||
} else if (level == lastLevel && x == lastX + lastTotal) {
|
||||
func = 'n';
|
||||
}
|
||||
|
||||
StringBuilder sb = outbuf.append(func).append('(').append(nameAndType);
|
||||
if (func == 'f') {
|
||||
sb.append(',').append(level).append(',').append(x - lastX);
|
||||
}
|
||||
if (frame.total != lastTotal || hasExtraTypes) {
|
||||
sb.append(',').append(frame.total);
|
||||
if (hasExtraTypes) {
|
||||
sb.append(',').append(frame.inlined).append(',').append(frame.c1).append(',').append(frame.interpreted);
|
||||
}
|
||||
}
|
||||
sb.append(")\n");
|
||||
|
||||
if (sb.length() > FLUSH_THRESHOLD) {
|
||||
out.print(sb);
|
||||
sb.setLength(0);
|
||||
}
|
||||
|
||||
lastLevel = level;
|
||||
lastX = x;
|
||||
lastTotal = frame.total;
|
||||
|
||||
Frame[] children = frame.values().toArray(EMPTY_FRAME_ARRAY);
|
||||
Arrays.sort(children, this);
|
||||
|
||||
x += frame.self;
|
||||
for (Frame child : children) {
|
||||
if (child.total >= mintotal) {
|
||||
printFrame(out, child, level + 1, x);
|
||||
}
|
||||
x += child.total;
|
||||
}
|
||||
}
|
||||
|
||||
private void printFrameCollapsed(PrintStream out, Frame frame, String[] strings) {
|
||||
StringBuilder sb = outbuf;
|
||||
int prevLength = sb.length();
|
||||
|
||||
if (frame != root) {
|
||||
sb.append(strings[frame.getTitleIndex()]).append(FRAME_SUFFIX[frame.getType()]);
|
||||
if (frame.self > 0) {
|
||||
int tmpLength = sb.length();
|
||||
out.print(sb.append(' ').append(frame.self).append('\n'));
|
||||
sb.setLength(tmpLength);
|
||||
}
|
||||
sb.append(';');
|
||||
}
|
||||
|
||||
if (!frame.isEmpty()) {
|
||||
for (Frame child : frame.values()) {
|
||||
if (child.total >= mintotal) {
|
||||
printFrameCollapsed(out, child, strings);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sb.setLength(prevLength);
|
||||
}
|
||||
|
||||
private boolean excludeStack(CallStack stack) {
|
||||
Pattern include = args.include;
|
||||
Pattern exclude = args.exclude;
|
||||
if (include == null && exclude == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (int i = 0; i < stack.size; i++) {
|
||||
if (exclude != null && exclude.matcher(stack.names[i]).matches()) {
|
||||
return true;
|
||||
}
|
||||
if (include != null && include.matcher(stack.names[i]).matches()) {
|
||||
if (exclude == null) return false;
|
||||
include = null;
|
||||
}
|
||||
}
|
||||
|
||||
return include != null;
|
||||
}
|
||||
|
||||
private Frame addChild(Frame frame, String title, byte type, long ticks) {
|
||||
frame.total += ticks;
|
||||
|
||||
int titleIndex = cpool.index(title);
|
||||
|
||||
Frame child;
|
||||
switch (type) {
|
||||
case TYPE_INTERPRETED:
|
||||
(child = frame.getChild(titleIndex, TYPE_JIT_COMPILED)).interpreted += ticks;
|
||||
break;
|
||||
case TYPE_INLINED:
|
||||
(child = frame.getChild(titleIndex, TYPE_JIT_COMPILED)).inlined += ticks;
|
||||
break;
|
||||
case TYPE_C1_COMPILED:
|
||||
(child = frame.getChild(titleIndex, TYPE_JIT_COMPILED)).c1 += ticks;
|
||||
break;
|
||||
default:
|
||||
child = frame.getChild(titleIndex, type);
|
||||
}
|
||||
return child;
|
||||
}
|
||||
|
||||
private static byte detectType(String title) {
|
||||
if (title.endsWith("_[j]")) {
|
||||
return TYPE_JIT_COMPILED | HAS_SUFFIX;
|
||||
} else if (title.endsWith("_[i]")) {
|
||||
return TYPE_INLINED | HAS_SUFFIX;
|
||||
} else if (title.endsWith("_[k]")) {
|
||||
return TYPE_KERNEL | HAS_SUFFIX;
|
||||
} else if (title.endsWith("_[0]")) {
|
||||
return TYPE_INTERPRETED | HAS_SUFFIX;
|
||||
} else if (title.endsWith("_[1]")) {
|
||||
return TYPE_C1_COMPILED | HAS_SUFFIX;
|
||||
} else if (title.contains("::") || title.startsWith("-[") || title.startsWith("+[")) {
|
||||
return TYPE_CPP;
|
||||
} else if (title.indexOf('/') > 0 && title.charAt(0) != '['
|
||||
|| title.indexOf('.') > 0 && Character.isUpperCase(title.charAt(0))) {
|
||||
return TYPE_JIT_COMPILED;
|
||||
} else {
|
||||
return TYPE_NATIVE;
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean isThreadFrame(String name, byte type) {
|
||||
return type == TYPE_NATIVE && name.startsWith("[") && TID_FRAME_PATTERN.matcher(name).matches();
|
||||
}
|
||||
|
||||
private static int getCommonPrefix(String a, String b) {
|
||||
int length = Math.min(a.length(), b.length());
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (a.charAt(i) != b.charAt(i) || a.charAt(i) > 127) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return length;
|
||||
}
|
||||
|
||||
private static String escape(String s) {
|
||||
if (s.indexOf('\\') >= 0) s = s.replace("\\", "\\\\");
|
||||
if (s.indexOf('\'') >= 0) s = s.replace("'", "\\'");
|
||||
return s;
|
||||
}
|
||||
|
||||
private static String unescape(String s) {
|
||||
if (s.indexOf('\'') >= 0) s = s.replace("\\'", "'");
|
||||
if (s.indexOf('\\') >= 0) s = s.replace("\\\\", "\\");
|
||||
return s;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(Frame f1, Frame f2) {
|
||||
return order[f1.getTitleIndex()] - order[f2.getTitleIndex()];
|
||||
}
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
FlameGraph fg = new FlameGraph(args);
|
||||
try (InputStreamReader in = new InputStreamReader(new FileInputStream(input), StandardCharsets.UTF_8)) {
|
||||
if (input.endsWith(".html")) {
|
||||
fg.parseHtml(in);
|
||||
} else {
|
||||
fg.parseCollapsed(in);
|
||||
}
|
||||
}
|
||||
try (PrintStream out = new PrintStream(output, "UTF-8")) {
|
||||
fg.dump(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
||||
public class Frame extends HashMap<Integer, Frame> {
|
||||
public static final byte TYPE_INTERPRETED = 0;
|
||||
public static final byte TYPE_JIT_COMPILED = 1;
|
||||
public static final byte TYPE_INLINED = 2;
|
||||
public static final byte TYPE_NATIVE = 3;
|
||||
public static final byte TYPE_CPP = 4;
|
||||
public static final byte TYPE_KERNEL = 5;
|
||||
public static final byte TYPE_C1_COMPILED = 6;
|
||||
|
||||
private static final int TYPE_SHIFT = 28;
|
||||
|
||||
final int key;
|
||||
long total;
|
||||
long self;
|
||||
long inlined, c1, interpreted;
|
||||
|
||||
private Frame(int key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
Frame(int titleIndex, byte type) {
|
||||
this(titleIndex | type << TYPE_SHIFT);
|
||||
}
|
||||
|
||||
Frame getChild(int titleIndex, byte type) {
|
||||
return super.computeIfAbsent(titleIndex | type << TYPE_SHIFT, Frame::new);
|
||||
}
|
||||
|
||||
int getTitleIndex() {
|
||||
return key & ((1 << TYPE_SHIFT) - 1);
|
||||
}
|
||||
|
||||
byte getType() {
|
||||
if (inlined * 3 >= total) {
|
||||
return TYPE_INLINED;
|
||||
} else if (c1 * 2 >= total) {
|
||||
return TYPE_C1_COMPILED;
|
||||
} else if (interpreted * 2 >= total) {
|
||||
return TYPE_INTERPRETED;
|
||||
} else {
|
||||
return (byte) (key >>> TYPE_SHIFT);
|
||||
}
|
||||
}
|
||||
|
||||
int depth(long cutoff) {
|
||||
int depth = 0;
|
||||
if (size() > 0) {
|
||||
for (Frame child : values()) {
|
||||
if (child.total >= cutoff) {
|
||||
depth = Math.max(depth, child.depth(cutoff));
|
||||
}
|
||||
}
|
||||
}
|
||||
return depth + 1;
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import java.lang.reflect.Array;
|
||||
import java.util.HashMap;
|
||||
|
||||
public class Index<T> extends HashMap<T, Integer> {
|
||||
private final Class<T> cls;
|
||||
|
||||
public Index(Class<T> cls, T empty) {
|
||||
this(cls, empty, 256);
|
||||
}
|
||||
|
||||
public Index(Class<T> cls, T empty, int initialCapacity) {
|
||||
super(initialCapacity);
|
||||
this.cls = cls;
|
||||
super.put(empty, 0);
|
||||
}
|
||||
|
||||
public int index(T key) {
|
||||
Integer index = super.get(key);
|
||||
if (index != null) {
|
||||
return index;
|
||||
} else {
|
||||
int newIndex = super.size();
|
||||
super.put(key, newIndex);
|
||||
return newIndex;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public T[] keys() {
|
||||
T[] result = (T[]) Array.newInstance(cls, size());
|
||||
keys(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
public void keys(T[] result) {
|
||||
for (Entry<T, Integer> entry : entrySet()) {
|
||||
result[entry.getValue()] = entry.getKey();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,275 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import one.jfr.ClassRef;
|
||||
import one.jfr.Dictionary;
|
||||
import one.jfr.JfrReader;
|
||||
import one.jfr.MethodRef;
|
||||
import one.jfr.event.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.BitSet;
|
||||
import java.util.Map;
|
||||
|
||||
import static one.convert.Frame.*;
|
||||
|
||||
public abstract class JfrConverter extends Classifier {
|
||||
protected final JfrReader jfr;
|
||||
protected final Arguments args;
|
||||
protected final EventCollector collector;
|
||||
protected Dictionary<String> methodNames;
|
||||
|
||||
public JfrConverter(JfrReader jfr, Arguments args) {
|
||||
this.jfr = jfr;
|
||||
this.args = args;
|
||||
|
||||
EventCollector collector = createCollector(args);
|
||||
this.collector = args.nativemem && args.leak ? new MallocLeakAggregator(collector) : collector;
|
||||
}
|
||||
|
||||
public void convert() throws IOException {
|
||||
jfr.stopAtNewChunk = true;
|
||||
|
||||
while (jfr.hasMoreChunks()) {
|
||||
// Reset method dictionary, since new chunk may have different IDs
|
||||
methodNames = new Dictionary<>();
|
||||
|
||||
collector.beforeChunk();
|
||||
collectEvents();
|
||||
collector.afterChunk();
|
||||
|
||||
convertChunk();
|
||||
}
|
||||
|
||||
if (collector.finish()) {
|
||||
convertChunk();
|
||||
}
|
||||
}
|
||||
|
||||
protected EventCollector createCollector(Arguments args) {
|
||||
return new EventAggregator(args.threads, args.grain);
|
||||
}
|
||||
|
||||
protected void collectEvents() throws IOException {
|
||||
Class<? extends Event> eventClass = args.nativemem ? MallocEvent.class
|
||||
: args.live ? LiveObject.class
|
||||
: args.alloc ? AllocationSample.class
|
||||
: args.lock ? ContendedLock.class
|
||||
: ExecutionSample.class;
|
||||
|
||||
BitSet threadStates = null;
|
||||
if (args.state != null) {
|
||||
threadStates = new BitSet();
|
||||
for (String state : args.state.toUpperCase().split(",")) {
|
||||
threadStates.set(toThreadState(state));
|
||||
}
|
||||
} else if (args.cpu) {
|
||||
threadStates = getThreadStates(true);
|
||||
} else if (args.wall) {
|
||||
threadStates = getThreadStates(false);
|
||||
}
|
||||
|
||||
long startTicks = args.from != 0 ? toTicks(args.from) : Long.MIN_VALUE;
|
||||
long endTicks = args.to != 0 ? toTicks(args.to) : Long.MAX_VALUE;
|
||||
|
||||
for (Event event; (event = jfr.readEvent(eventClass)) != null; ) {
|
||||
if (event.time >= startTicks && event.time <= endTicks) {
|
||||
if (threadStates == null || threadStates.get(((ExecutionSample) event).threadState)) {
|
||||
collector.collect(event);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void convertChunk() {
|
||||
// To be overridden in subclasses
|
||||
}
|
||||
|
||||
protected int toThreadState(String name) {
|
||||
Map<Integer, String> threadStates = jfr.enums.get("jdk.types.ThreadState");
|
||||
if (threadStates != null) {
|
||||
for (Map.Entry<Integer, String> entry : threadStates.entrySet()) {
|
||||
if (entry.getValue().startsWith(name, 6)) {
|
||||
return entry.getKey();
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("Unknown thread state: " + name);
|
||||
}
|
||||
|
||||
protected BitSet getThreadStates(boolean cpu) {
|
||||
BitSet set = new BitSet();
|
||||
Map<Integer, String> threadStates = jfr.enums.get("jdk.types.ThreadState");
|
||||
if (threadStates != null) {
|
||||
for (Map.Entry<Integer, String> entry : threadStates.entrySet()) {
|
||||
set.set(entry.getKey(), "STATE_DEFAULT".equals(entry.getValue()) == cpu);
|
||||
}
|
||||
}
|
||||
return set;
|
||||
}
|
||||
|
||||
// millis can be an absolute timestamp or an offset from the beginning/end of the recording
|
||||
protected long toTicks(long millis) {
|
||||
long nanos = millis * 1_000_000;
|
||||
if (millis < 0) {
|
||||
nanos += jfr.endNanos;
|
||||
} else if (millis < 1500000000000L) {
|
||||
nanos += jfr.startNanos;
|
||||
}
|
||||
return (long) ((nanos - jfr.chunkStartNanos) * (jfr.ticksPerSec / 1e9)) + jfr.chunkStartTicks;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMethodName(long methodId, byte methodType) {
|
||||
String result = methodNames.get(methodId);
|
||||
if (result == null) {
|
||||
methodNames.put(methodId, result = resolveMethodName(methodId, methodType));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private String resolveMethodName(long methodId, byte methodType) {
|
||||
MethodRef method = jfr.methods.get(methodId);
|
||||
if (method == null) {
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
ClassRef cls = jfr.classes.get(method.cls);
|
||||
byte[] className = jfr.symbols.get(cls.name);
|
||||
byte[] methodName = jfr.symbols.get(method.name);
|
||||
|
||||
if (className == null || className.length == 0 || isNativeFrame(methodType)) {
|
||||
return new String(methodName, StandardCharsets.UTF_8);
|
||||
} else {
|
||||
String classStr = toJavaClassName(className, 0, args.dot);
|
||||
if (methodName == null || methodName.length == 0) {
|
||||
return classStr;
|
||||
}
|
||||
String methodStr = new String(methodName, StandardCharsets.UTF_8);
|
||||
return classStr + '.' + methodStr;
|
||||
}
|
||||
}
|
||||
|
||||
public String getClassName(long classId) {
|
||||
ClassRef cls = jfr.classes.get(classId);
|
||||
if (cls == null) {
|
||||
return "null";
|
||||
}
|
||||
byte[] className = jfr.symbols.get(cls.name);
|
||||
|
||||
int arrayDepth = 0;
|
||||
while (className[arrayDepth] == '[') {
|
||||
arrayDepth++;
|
||||
}
|
||||
|
||||
String name = toJavaClassName(className, arrayDepth, true);
|
||||
while (arrayDepth-- > 0) {
|
||||
name = name.concat("[]");
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
private String toJavaClassName(byte[] symbol, int start, boolean dotted) {
|
||||
int end = symbol.length;
|
||||
if (start > 0) {
|
||||
switch (symbol[start]) {
|
||||
case 'B':
|
||||
return "byte";
|
||||
case 'C':
|
||||
return "char";
|
||||
case 'S':
|
||||
return "short";
|
||||
case 'I':
|
||||
return "int";
|
||||
case 'J':
|
||||
return "long";
|
||||
case 'Z':
|
||||
return "boolean";
|
||||
case 'F':
|
||||
return "float";
|
||||
case 'D':
|
||||
return "double";
|
||||
case 'L':
|
||||
start++;
|
||||
end--;
|
||||
}
|
||||
}
|
||||
|
||||
if (args.norm) {
|
||||
for (int i = end - 2; i > start; i--) {
|
||||
if (symbol[i] == '/' || symbol[i] == '.') {
|
||||
if (symbol[i + 1] >= '0' && symbol[i + 1] <= '9') {
|
||||
end = i;
|
||||
if (i > start + 19 && symbol[i - 19] == '+' && symbol[i - 18] == '0') {
|
||||
// Original JFR transforms lambda names to something like
|
||||
// pkg.ClassName$$Lambda+0x00007f8177090218/543846639
|
||||
end = i - 19;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (args.simple) {
|
||||
for (int i = end - 2; i >= start; i--) {
|
||||
if (symbol[i] == '/' && (symbol[i + 1] < '0' || symbol[i + 1] > '9')) {
|
||||
start = i + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
String s = new String(symbol, start, end - start, StandardCharsets.UTF_8);
|
||||
return dotted ? s.replace('/', '.') : s;
|
||||
}
|
||||
|
||||
public StackTraceElement getStackTraceElement(long methodId, byte methodType, int location) {
|
||||
MethodRef method = jfr.methods.get(methodId);
|
||||
if (method == null) {
|
||||
return new StackTraceElement("", "unknown", null, 0);
|
||||
}
|
||||
|
||||
ClassRef cls = jfr.classes.get(method.cls);
|
||||
byte[] className = jfr.symbols.get(cls.name);
|
||||
byte[] methodName = jfr.symbols.get(method.name);
|
||||
|
||||
String classStr = className == null || className.length == 0 || isNativeFrame(methodType) ? "" :
|
||||
toJavaClassName(className, 0, args.dot);
|
||||
String methodStr = methodName == null || methodName.length == 0 ? "" :
|
||||
new String(methodName, StandardCharsets.UTF_8);
|
||||
return new StackTraceElement(classStr, methodStr, null, location >>> 16);
|
||||
}
|
||||
|
||||
public String getThreadName(int tid) {
|
||||
String threadName = jfr.threads.get(tid);
|
||||
return threadName == null ? "[tid=" + tid + ']' :
|
||||
threadName.startsWith("[tid=") ? threadName : '[' + threadName + " tid=" + tid + ']';
|
||||
}
|
||||
|
||||
protected boolean isNativeFrame(byte methodType) {
|
||||
// In JDK Flight Recorder, TYPE_NATIVE denotes Java native methods,
|
||||
// while in async-profiler, TYPE_NATIVE is for C methods
|
||||
return methodType == TYPE_NATIVE && jfr.getEnumValue("jdk.types.FrameType", TYPE_KERNEL) != null ||
|
||||
methodType == TYPE_CPP ||
|
||||
methodType == TYPE_KERNEL;
|
||||
}
|
||||
|
||||
// Select sum(samples) or sum(value) depending on the --total option.
|
||||
// For lock events, convert lock duration from ticks to nanoseconds.
|
||||
protected abstract class AggregatedEventVisitor implements EventCollector.Visitor {
|
||||
final double factor = !args.total ? 0.0 : args.lock ? 1e9 / jfr.ticksPerSec : 1.0;
|
||||
|
||||
@Override
|
||||
public final void visit(Event event, long samples, long value) {
|
||||
visit(event, factor == 0.0 ? samples : factor == 1.0 ? value : (long) (value * factor));
|
||||
}
|
||||
|
||||
protected abstract void visit(Event event, long value);
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import one.jfr.JfrReader;
|
||||
import one.jfr.StackTrace;
|
||||
import one.jfr.event.AllocationSample;
|
||||
import one.jfr.event.Event;
|
||||
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.PrintStream;
|
||||
|
||||
import static one.convert.Frame.*;
|
||||
|
||||
/**
|
||||
* Converts .jfr output to HTML Flame Graph.
|
||||
*/
|
||||
public class JfrToFlame extends JfrConverter {
|
||||
private final FlameGraph fg;
|
||||
|
||||
public JfrToFlame(JfrReader jfr, Arguments args) {
|
||||
super(jfr, args);
|
||||
this.fg = new FlameGraph(args);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void convertChunk() {
|
||||
collector.forEach(new AggregatedEventVisitor() {
|
||||
final CallStack stack = new CallStack();
|
||||
|
||||
@Override
|
||||
public void visit(Event event, long value) {
|
||||
StackTrace stackTrace = jfr.stackTraces.get(event.stackTraceId);
|
||||
if (stackTrace != null) {
|
||||
Arguments args = JfrToFlame.this.args;
|
||||
long[] methods = stackTrace.methods;
|
||||
byte[] types = stackTrace.types;
|
||||
int[] locations = stackTrace.locations;
|
||||
|
||||
if (args.threads) {
|
||||
stack.push(getThreadName(event.tid), TYPE_NATIVE);
|
||||
}
|
||||
if (args.classify) {
|
||||
Classifier.Category category = getCategory(stackTrace);
|
||||
stack.push(category.title, category.type);
|
||||
}
|
||||
for (int i = methods.length; --i >= 0; ) {
|
||||
String methodName = getMethodName(methods[i], types[i]);
|
||||
int location;
|
||||
if (args.lines && (location = locations[i] >>> 16) != 0) {
|
||||
methodName += ":" + location;
|
||||
} else if (args.bci && (location = locations[i] & 0xffff) != 0) {
|
||||
methodName += "@" + location;
|
||||
}
|
||||
stack.push(methodName, types[i]);
|
||||
}
|
||||
long classId = event.classId();
|
||||
if (classId != 0) {
|
||||
stack.push(getClassName(classId), (event instanceof AllocationSample)
|
||||
&& ((AllocationSample) event).tlabSize == 0 ? TYPE_KERNEL : TYPE_INLINED);
|
||||
}
|
||||
|
||||
fg.addSample(stack, value);
|
||||
stack.clear();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void dump(OutputStream out) throws IOException {
|
||||
try (PrintStream ps = new PrintStream(out, false, "UTF-8")) {
|
||||
fg.dump(ps);
|
||||
}
|
||||
}
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
JfrToFlame converter;
|
||||
try (JfrReader jfr = new JfrReader(input)) {
|
||||
converter = new JfrToFlame(jfr, args);
|
||||
converter.convert();
|
||||
}
|
||||
try (FileOutputStream out = new FileOutputStream(output)) {
|
||||
converter.dump(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import one.heatmap.Heatmap;
|
||||
import one.jfr.Dictionary;
|
||||
import one.jfr.JfrReader;
|
||||
import one.jfr.StackTrace;
|
||||
import one.jfr.event.AllocationSample;
|
||||
import one.jfr.event.ContendedLock;
|
||||
import one.jfr.event.Event;
|
||||
import one.jfr.event.EventCollector;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
import static one.convert.Frame.TYPE_INLINED;
|
||||
import static one.convert.Frame.TYPE_KERNEL;
|
||||
|
||||
public class JfrToHeatmap extends JfrConverter {
|
||||
private final Heatmap heatmap;
|
||||
|
||||
public JfrToHeatmap(JfrReader jfr, Arguments args) {
|
||||
super(jfr, args);
|
||||
this.heatmap = new Heatmap(args, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected EventCollector createCollector(Arguments args) {
|
||||
return new EventCollector() {
|
||||
@Override
|
||||
public void collect(Event event) {
|
||||
int extra = 0;
|
||||
byte type = 0;
|
||||
if (event instanceof AllocationSample) {
|
||||
extra = ((AllocationSample) event).classId;
|
||||
type = ((AllocationSample) event).tlabSize == 0 ? TYPE_KERNEL : TYPE_INLINED;
|
||||
} else if (event instanceof ContendedLock) {
|
||||
extra = ((ContendedLock) event).classId;
|
||||
type = TYPE_KERNEL;
|
||||
}
|
||||
|
||||
long msFromStart = (event.time - jfr.chunkStartTicks) * 1_000 / jfr.ticksPerSec;
|
||||
long timeMs = jfr.chunkStartNanos / 1_000_000 + msFromStart;
|
||||
|
||||
heatmap.addEvent(event.stackTraceId, extra, type, timeMs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeChunk() {
|
||||
heatmap.beforeChunk();
|
||||
jfr.stackTraces.forEach(new Dictionary.Visitor<StackTrace>() {
|
||||
@Override
|
||||
public void visit(long key, StackTrace trace) {
|
||||
heatmap.addStack(key, trace.methods, trace.locations, trace.types, trace.methods.length);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterChunk() {
|
||||
jfr.stackTraces.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean finish() {
|
||||
heatmap.finish(jfr.startNanos / 1_000_000);
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEach(Visitor visitor) {
|
||||
throw new AssertionError("Should not be called");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public void dump(OutputStream out) throws IOException {
|
||||
try (PrintStream ps = new PrintStream(out, false, "UTF-8")) {
|
||||
heatmap.dump(ps);
|
||||
}
|
||||
}
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
JfrToHeatmap converter;
|
||||
try (JfrReader jfr = new JfrReader(input)) {
|
||||
converter = new JfrToHeatmap(jfr, args);
|
||||
converter.convert();
|
||||
}
|
||||
try (OutputStream out = new BufferedOutputStream(new FileOutputStream(output))) {
|
||||
converter.dump(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import one.jfr.JfrReader;
|
||||
import one.jfr.StackTrace;
|
||||
import one.jfr.event.Event;
|
||||
import one.proto.Proto;
|
||||
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
/**
|
||||
* Converts .jfr output to <a href="https://github.com/google/pprof">pprof</a>.
|
||||
*/
|
||||
public class JfrToPprof extends JfrConverter {
|
||||
private final Proto profile = new Proto(100000);
|
||||
private final Index<String> strings = new Index<>(String.class, "");
|
||||
private final Index<String> functions = new Index<>(String.class, "");
|
||||
private final Index<Long> locations = new Index<>(Long.class, 0L);
|
||||
|
||||
public JfrToPprof(JfrReader jfr, Arguments args) {
|
||||
super(jfr, args);
|
||||
|
||||
Proto sampleType;
|
||||
if (args.nativemem) {
|
||||
sampleType = valueType("malloc", args.total ? "bytes" : "count");
|
||||
} else if (args.alloc || args.live) {
|
||||
sampleType = valueType("allocations", args.total ? "bytes" : "count");
|
||||
} else if (args.lock) {
|
||||
sampleType = valueType("locks", args.total ? "nanoseconds" : "count");
|
||||
} else {
|
||||
sampleType = valueType("cpu", args.total ? "nanoseconds" : "count");
|
||||
}
|
||||
|
||||
profile.field(1, sampleType)
|
||||
.field(13, strings.index("Produced by async-profiler"));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void convertChunk() {
|
||||
collector.forEach(new AggregatedEventVisitor() {
|
||||
final Proto s = new Proto(100);
|
||||
|
||||
@Override
|
||||
public void visit(Event event, long value) {
|
||||
profile.field(2, sample(s, event, value));
|
||||
s.reset();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void dump(OutputStream out) throws IOException {
|
||||
profile.field(3, mapping(1, 0, Long.MAX_VALUE, "async-profiler"));
|
||||
|
||||
Long[] locations = this.locations.keys();
|
||||
for (int i = 1; i < locations.length; i++) {
|
||||
profile.field(4, location(i, locations[i]));
|
||||
}
|
||||
|
||||
String[] functions = this.functions.keys();
|
||||
for (int i = 1; i < functions.length; i++) {
|
||||
profile.field(5, function(i, functions[i]));
|
||||
}
|
||||
|
||||
String[] strings = this.strings.keys();
|
||||
for (String string : strings) {
|
||||
profile.field(6, string);
|
||||
}
|
||||
|
||||
profile.field(9, jfr.startNanos)
|
||||
.field(10, jfr.durationNanos());
|
||||
|
||||
out.write(profile.buffer(), 0, profile.size());
|
||||
}
|
||||
|
||||
private Proto sample(Proto s, Event event, long value) {
|
||||
int packedLocations = s.startField(1);
|
||||
|
||||
long classId = event.classId();
|
||||
if (classId != 0) {
|
||||
int function = functions.index(getClassName(classId));
|
||||
s.writeInt(locations.index((long) function << 16));
|
||||
}
|
||||
|
||||
StackTrace stackTrace = jfr.stackTraces.get(event.stackTraceId);
|
||||
if (stackTrace != null) {
|
||||
long[] methods = stackTrace.methods;
|
||||
byte[] types = stackTrace.types;
|
||||
int[] lines = stackTrace.locations;
|
||||
for (int i = 0; i < methods.length; i++) {
|
||||
String methodName = getMethodName(methods[i], types[i]);
|
||||
int function = functions.index(methodName);
|
||||
s.writeInt(locations.index((long) function << 16 | lines[i] >>> 16));
|
||||
}
|
||||
}
|
||||
|
||||
s.commitField(packedLocations);
|
||||
s.field(2, value);
|
||||
|
||||
if (args.threads && event.tid != 0) {
|
||||
s.field(3, label("thread", getThreadName(event.tid)));
|
||||
}
|
||||
if (args.classify && stackTrace != null) {
|
||||
s.field(3, label("category", getCategory(stackTrace).title));
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
private Proto valueType(String type, String unit) {
|
||||
return new Proto(16)
|
||||
.field(1, strings.index(type))
|
||||
.field(2, strings.index(unit));
|
||||
}
|
||||
|
||||
private Proto label(String key, String str) {
|
||||
return new Proto(16)
|
||||
.field(1, strings.index(key))
|
||||
.field(2, strings.index(str));
|
||||
}
|
||||
|
||||
private Proto mapping(int id, long start, long limit, String fileName) {
|
||||
return new Proto(16)
|
||||
.field(1, id)
|
||||
.field(2, start)
|
||||
.field(3, limit)
|
||||
.field(5, strings.index(fileName));
|
||||
}
|
||||
|
||||
private Proto location(int id, long location) {
|
||||
return new Proto(16)
|
||||
.field(1, id)
|
||||
.field(4, line((int) (location >>> 16), (int) location & 0xffff));
|
||||
}
|
||||
|
||||
private Proto line(int functionId, int line) {
|
||||
return new Proto(16)
|
||||
.field(1, functionId)
|
||||
.field(2, line);
|
||||
}
|
||||
|
||||
private Proto function(int id, String name) {
|
||||
return new Proto(16)
|
||||
.field(1, id)
|
||||
.field(2, strings.index(name));
|
||||
}
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
JfrToPprof converter;
|
||||
try (JfrReader jfr = new JfrReader(input)) {
|
||||
converter = new JfrToPprof(jfr, args);
|
||||
converter.convert();
|
||||
}
|
||||
try (FileOutputStream fos = new FileOutputStream(output);
|
||||
OutputStream out = args.output.endsWith("gz") ? new GZIPOutputStream(fos, 4096) : fos) {
|
||||
converter.dump(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.PrintStream;
|
||||
|
||||
public class ResourceProcessor {
|
||||
|
||||
public static String getResource(String name) {
|
||||
try (InputStream stream = ResourceProcessor.class.getResourceAsStream(name)) {
|
||||
if (stream == null) {
|
||||
throw new IOException("No resource found");
|
||||
}
|
||||
|
||||
ByteArrayOutputStream result = new ByteArrayOutputStream();
|
||||
byte[] buffer = new byte[32768];
|
||||
for (int length; (length = stream.read(buffer)) != -1; ) {
|
||||
result.write(buffer, 0, length);
|
||||
}
|
||||
return result.toString("UTF-8");
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Can't load resource with name " + name);
|
||||
}
|
||||
}
|
||||
|
||||
public static String printTill(PrintStream out, String data, String till) {
|
||||
int index = data.indexOf(till);
|
||||
out.print(data.substring(0, index));
|
||||
return data.substring(index + till.length());
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,458 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
|
||||
import one.convert.Arguments;
|
||||
import one.convert.Index;
|
||||
import one.convert.JfrConverter;
|
||||
import one.convert.ResourceProcessor;
|
||||
import one.jfr.DictionaryInt;
|
||||
|
||||
public class Heatmap {
|
||||
|
||||
// TODO: should be probably an argument,
|
||||
// but there is a good chance that changing it will have some side effects
|
||||
public static final int BLOCK_DURATION_MS = 20;
|
||||
|
||||
private final Arguments args;
|
||||
private State state;
|
||||
private long startMs;
|
||||
|
||||
public Heatmap(Arguments args, JfrConverter converter) {
|
||||
this.args = args;
|
||||
this.state = new State(converter, BLOCK_DURATION_MS);
|
||||
}
|
||||
|
||||
public void addEvent(int stackTraceId, int extra, byte type, long timeMs) {
|
||||
state.addEvent(stackTraceId, extra, type, timeMs);
|
||||
}
|
||||
|
||||
public void addStack(long id, long[] methods, int[] locations, byte[] types, int size) {
|
||||
state.addStack(id, methods, locations, types, size);
|
||||
}
|
||||
|
||||
public void beforeChunk() {
|
||||
state.methodsCache.clear();
|
||||
}
|
||||
|
||||
public void finish(long startMs) {
|
||||
this.startMs = startMs;
|
||||
state.methodsCache.clear();
|
||||
state.stackTracesCache.clear();
|
||||
}
|
||||
|
||||
private EvaluationContext evaluate() {
|
||||
State state = this.state;
|
||||
this.state = null;
|
||||
return new EvaluationContext(
|
||||
state.sampleList.samples(),
|
||||
state.methodsCache.methodsIndex(),
|
||||
state.stackTracesRemap.orderedTraces(),
|
||||
state.methodsCache.orderedSymbolTable()
|
||||
);
|
||||
}
|
||||
|
||||
private void compressMethods(HtmlOut out, Method[] methods) {
|
||||
out.writeVar(methods.length);
|
||||
for (Method method : methods) {
|
||||
out.writeVar(method.className);
|
||||
out.writeVar(method.methodName);
|
||||
out.write18(method.location & 0xffff);
|
||||
out.write18(method.location >>> 16);
|
||||
out.write6(method.type);
|
||||
}
|
||||
}
|
||||
|
||||
public void dump(PrintStream stream) throws IOException {
|
||||
if (state.sampleList.getRecordsCount() == 0) {
|
||||
// Need a better way to handle this, but we should not throw an exception
|
||||
stream.println("No samples found");
|
||||
return;
|
||||
}
|
||||
|
||||
EvaluationContext evaluationContext = evaluate();
|
||||
|
||||
String tail = ResourceProcessor.getResource("/heatmap.html");
|
||||
|
||||
tail = ResourceProcessor.printTill(stream, tail, "/*executionsHeatmap:*/");
|
||||
HtmlOut out = new HtmlOut(stream);
|
||||
stream.print('S');
|
||||
printHeatmap(out, evaluationContext);
|
||||
stream.print('E');
|
||||
|
||||
tail = ResourceProcessor.printTill(stream, tail, "/*methods:*/");
|
||||
out.reset();
|
||||
stream.print('S');
|
||||
printMethods(out, evaluationContext);
|
||||
stream.print('E');
|
||||
|
||||
tail = ResourceProcessor.printTill(stream, tail, "/*title:*/");
|
||||
stream.print(args.title == null ? "Heatmap" : args.title);
|
||||
|
||||
tail = ResourceProcessor.printTill(stream, tail, "/*startMs:*/0");
|
||||
stream.print(startMs);
|
||||
|
||||
tail = ResourceProcessor.printTill(stream, tail, "/*cpool:*/");
|
||||
printConstantPool(stream, evaluationContext);
|
||||
|
||||
stream.print(tail);
|
||||
}
|
||||
|
||||
private void printHeatmap(final HtmlOut out, EvaluationContext context) {
|
||||
int veryStart = out.pos();
|
||||
int wasPos = out.pos();
|
||||
|
||||
// calculates methods frequency during building the tree
|
||||
int[] stackChunksBuffer = buildLz78TreeAndPrepareData(context);
|
||||
|
||||
// gives methods new ids, more frequent (in tree's data) methods will have lower id
|
||||
renameMethodsByFrequency(context);
|
||||
|
||||
// writes "starts" - ids of methods that indicates a start of a next stack trace
|
||||
writeStartMethods(out, context);
|
||||
wasPos = debugStep("start methods", out, wasPos, veryStart);
|
||||
|
||||
// writes block sizes, compressed by huffman algorithm
|
||||
writeBlockSizes(out, context);
|
||||
wasPos = debugStep("stack sizes", out, wasPos, veryStart);
|
||||
|
||||
// NOTE: destroys internal state!
|
||||
SynonymTable synonymTable = context.nodeTree.extractSynonymTable();
|
||||
synonymTable.calculateSynonyms();
|
||||
// writes frequent lz tree nodes as a synonyms table
|
||||
writeSynonymsTable(out, synonymTable);
|
||||
wasPos = debugStep("tree synonyms", out, wasPos, veryStart);
|
||||
|
||||
// writes lz tree with two pairs of var-ints: [parent node id] + [method id of this node]
|
||||
writeTree(out, synonymTable, context);
|
||||
wasPos = debugStep("tree body", out, wasPos, veryStart);
|
||||
|
||||
// calculate counts for the next synonyms table, that will be used for samples
|
||||
int chunksCount = calculateSamplesSynonyms(synonymTable, context, stackChunksBuffer);
|
||||
// writes frequent lz tree nodes as a synonyms table (for sample chunks)
|
||||
writeSynonymsTable(out, synonymTable);
|
||||
wasPos = debugStep("samples synonyms", out, wasPos, veryStart);
|
||||
|
||||
// writes sample chunks as var-ints references for [node id]
|
||||
writeSamples(out, synonymTable, context, stackChunksBuffer);
|
||||
debugStep("samples body", out, wasPos, veryStart);
|
||||
debug("storage size: " + context.nodeTree.storageSize());
|
||||
|
||||
out.write30(context.nodeTree.nodesCount());
|
||||
out.write30(context.sampleList.blockSizes.length);
|
||||
out.write30(context.nodeTree.storageSize());
|
||||
out.write30(chunksCount);
|
||||
out.write30(context.sampleList.stackIds.length);
|
||||
}
|
||||
|
||||
private void writeSamples(HtmlOut out, SynonymTable synonymTable, EvaluationContext context,
|
||||
int[] stackChunksBuffer) {
|
||||
for (int stackId : context.sampleList.stackIds) {
|
||||
int chunksStart = stackChunksBuffer[stackId * 2];
|
||||
int chunksEnd = stackChunksBuffer[stackId * 2 + 1];
|
||||
|
||||
for (int i = chunksStart; i < chunksEnd; i++) {
|
||||
int nodeId = stackChunksBuffer[i];
|
||||
out.writeVar(synonymTable.nodeIdOrSynonym(nodeId));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private int calculateSamplesSynonyms(SynonymTable synonymTable, EvaluationContext context,
|
||||
int[] stackChunksBuffer) {
|
||||
int chunksCount = 0;
|
||||
int[] childrenCount = synonymTable.reset();
|
||||
|
||||
for (int stackId : context.sampleList.stackIds) {
|
||||
int chunksStart = stackChunksBuffer[stackId * 2];
|
||||
int chunksEnd = stackChunksBuffer[stackId * 2 + 1];
|
||||
|
||||
for (int i = chunksStart; i < chunksEnd; i++) {
|
||||
childrenCount[stackChunksBuffer[i]]--; // negation for reverse sort
|
||||
chunksCount++;
|
||||
}
|
||||
}
|
||||
|
||||
synonymTable.calculateSynonyms();
|
||||
return chunksCount;
|
||||
}
|
||||
|
||||
private void writeTree(HtmlOut out, SynonymTable synonymTable, EvaluationContext context) {
|
||||
long[] data = context.nodeTree.treeData();
|
||||
int dataSize = context.nodeTree.treeDataSize();
|
||||
for (int i = 0; i < dataSize; i++) {
|
||||
long d = data[i];
|
||||
int parentId = context.nodeTree.extractParentId(d);
|
||||
int methodId = context.nodeTree.extractMethodId(d);
|
||||
|
||||
out.writeVar(synonymTable.nodeIdOrSynonym(parentId));
|
||||
out.writeVar(context.orderedMethods[methodId].frequencyOrNewMethodId);
|
||||
}
|
||||
}
|
||||
|
||||
private void writeSynonymsTable(HtmlOut out, SynonymTable synonymTable) {
|
||||
out.writeVar(synonymTable.synonymsCount());
|
||||
for (int i = 0; i < synonymTable.synonymsCount(); i++) {
|
||||
out.writeVar(synonymTable.synonymAt(i));
|
||||
}
|
||||
}
|
||||
|
||||
private void writeStartMethods(HtmlOut out, EvaluationContext context) {
|
||||
int startsCount = 0;
|
||||
for (Method method : context.orderedMethods) {
|
||||
if (method.start) {
|
||||
startsCount++;
|
||||
}
|
||||
}
|
||||
out.writeVar(startsCount);
|
||||
for (Method method : context.orderedMethods) {
|
||||
if (method.start) {
|
||||
out.writeVar(method.frequencyOrNewMethodId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void renameMethodsByFrequency(EvaluationContext context) {
|
||||
Arrays.sort(context.orderedMethods, new Comparator<Method>() {
|
||||
@Override
|
||||
public int compare(Method o1, Method o2) {
|
||||
return Integer.compare(o2.frequencyOrNewMethodId, o1.frequencyOrNewMethodId);
|
||||
}
|
||||
});
|
||||
|
||||
for (int i = 0; i < context.orderedMethods.length; i++) {
|
||||
Method method = context.orderedMethods[i];
|
||||
method.frequencyOrNewMethodId = i + 1; // zero is reserved for no method
|
||||
}
|
||||
|
||||
// restores order
|
||||
context.methods.keys(context.orderedMethods);
|
||||
}
|
||||
|
||||
private int[] buildLz78TreeAndPrepareData(EvaluationContext context) {
|
||||
int[] samples = context.sampleList.stackIds;
|
||||
|
||||
// prepared data for output, firstly used to remember last stack positions
|
||||
int[] stackBuffer = new int[(context.stackTraces.length + 1) * 16];
|
||||
|
||||
// remember the last position of stackId
|
||||
for (int i = 0; i < samples.length; i++) {
|
||||
int stackId = samples[i];
|
||||
stackBuffer[stackId * 2] = ~i; // rewrites data multiple times, the last one wins
|
||||
}
|
||||
|
||||
int chunksIterator = context.stackTraces.length * 2 + 1;
|
||||
|
||||
// builds the tree and prepares data for the last stack
|
||||
for (int i = 0; i < samples.length; i++) {
|
||||
int stackId = samples[i];
|
||||
int current = 0;
|
||||
int[] stack = context.stackTraces[stackId];
|
||||
|
||||
if (i == ~stackBuffer[stackId * 2]) { // last version of that stack
|
||||
stackBuffer[stackId * 2] = chunksIterator; // start
|
||||
|
||||
for (int methodId : stack) {
|
||||
current = context.nodeTree.appendChild(current, methodId);
|
||||
if (current == 0) { // so we are starting from root again, it will be written to output as Lz78 element - [parent node id; method id]
|
||||
context.orderedMethods[methodId].frequencyOrNewMethodId++;
|
||||
if (stackBuffer.length == chunksIterator) {
|
||||
stackBuffer = Arrays.copyOf(stackBuffer, chunksIterator + chunksIterator / 2);
|
||||
}
|
||||
|
||||
int justAppendedId = context.nodeTree.nodesCount() - 1;
|
||||
stackBuffer[chunksIterator++] = justAppendedId;
|
||||
context.nodeTree.markNodeAsLastlyUsed(justAppendedId);
|
||||
}
|
||||
}
|
||||
|
||||
if (current != 0) {
|
||||
if (stackBuffer.length == chunksIterator) {
|
||||
stackBuffer = Arrays.copyOf(stackBuffer, chunksIterator + chunksIterator / 2);
|
||||
}
|
||||
|
||||
stackBuffer[chunksIterator++] = current;
|
||||
context.nodeTree.markNodeAsLastlyUsed(current);
|
||||
}
|
||||
|
||||
stackBuffer[stackId * 2 + 1] = chunksIterator; // end
|
||||
} else { // general case
|
||||
for (int methodId : stack) {
|
||||
current = context.nodeTree.appendChild(current, methodId);
|
||||
if (current == 0) { // so we are starting from root again, it will be written to output as Lz78 element - [parent node id; method id]
|
||||
context.orderedMethods[methodId].frequencyOrNewMethodId++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removes unused chunks
|
||||
context.nodeTree.compactTree(stackBuffer, context.stackTraces.length * 2 + 1, chunksIterator);
|
||||
|
||||
return stackBuffer;
|
||||
}
|
||||
|
||||
private void writeBlockSizes(HtmlOut out, EvaluationContext context) {
|
||||
int[] blockSizeFrequencies = new int[1024];
|
||||
int maxBlockSize = 0;
|
||||
for (int blockSize : context.sampleList.blockSizes) {
|
||||
if (blockSize >= blockSizeFrequencies.length) {
|
||||
blockSizeFrequencies = Arrays.copyOf(blockSizeFrequencies, blockSize * 2);
|
||||
}
|
||||
blockSizeFrequencies[blockSize]++;
|
||||
maxBlockSize = Math.max(maxBlockSize, blockSize);
|
||||
}
|
||||
|
||||
HuffmanEncoder encoder = new HuffmanEncoder(blockSizeFrequencies, maxBlockSize);
|
||||
|
||||
long[] decodeTable = encoder.calculateOutputTable();
|
||||
|
||||
out.writeVar(decodeTable.length);
|
||||
int maxBits = (int) (decodeTable[decodeTable.length - 1] >>> 56);
|
||||
out.writeVar(maxBits);
|
||||
|
||||
for (long l : decodeTable) {
|
||||
out.writeVar(l & 0x00FF_FFFF_FFFF_FFFFL);
|
||||
out.writeVar(l >>> 56);
|
||||
}
|
||||
|
||||
for (int blockSize : context.sampleList.blockSizes) {
|
||||
if (encoder.append(blockSize)) {
|
||||
for (int value : encoder.values) {
|
||||
out.nextByte(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (encoder.flushIfNeed()) {
|
||||
for (int value : encoder.values) {
|
||||
out.nextByte(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void printConstantPool(PrintStream out, EvaluationContext evaluationContext) {
|
||||
for (String symbol : evaluationContext.symbols) {
|
||||
out.print('"');
|
||||
out.print(symbol.replace("\\", "\\\\").replace("\"", "\\\""));
|
||||
out.print("\",");
|
||||
}
|
||||
}
|
||||
|
||||
private void printMethods(HtmlOut out, EvaluationContext evaluationContext) throws IOException {
|
||||
debug("methods count " + evaluationContext.orderedMethods.length);
|
||||
Arrays.sort(evaluationContext.orderedMethods, new Comparator<Method>() {
|
||||
@Override
|
||||
public int compare(Method o1, Method o2) {
|
||||
return Integer.compare(o1.frequencyOrNewMethodId, o2.frequencyOrNewMethodId);
|
||||
}
|
||||
});
|
||||
out.nextByte('A');
|
||||
compressMethods(out, evaluationContext.orderedMethods);
|
||||
out.nextByte('A');
|
||||
}
|
||||
|
||||
private int debugStep(String step, HtmlOut out, int wasPos, int veryStartPos) {
|
||||
int nowPos = out.pos();
|
||||
debug(step + " " + (nowPos - wasPos) / (1024.0 * 1024.0) + " MB");
|
||||
debug(step + " pos in data " + (nowPos - veryStartPos));
|
||||
return nowPos;
|
||||
}
|
||||
|
||||
private void debug(String text) {
|
||||
// Basically, no user will ever need that, but it will be helpful to debug broken data
|
||||
// System.out.println(text);
|
||||
}
|
||||
|
||||
private static class EvaluationContext {
|
||||
final Index<Method> methods;
|
||||
final Method[] orderedMethods;
|
||||
final int[][] stackTraces;
|
||||
final String[] symbols;
|
||||
|
||||
final SampleList.Result sampleList;
|
||||
|
||||
final LzNodeTree nodeTree = new LzNodeTree();
|
||||
|
||||
EvaluationContext(SampleList.Result sampleList, Index<Method> methods, int[][] stackTraces, String[] symbols) {
|
||||
this.sampleList = sampleList;
|
||||
this.methods = methods;
|
||||
this.stackTraces = stackTraces;
|
||||
this.symbols = symbols;
|
||||
|
||||
orderedMethods = methods.keys();
|
||||
}
|
||||
}
|
||||
|
||||
private static class State {
|
||||
|
||||
private static final int LIMIT = Integer.MAX_VALUE;
|
||||
|
||||
final SampleList sampleList;
|
||||
final StackStorage stackTracesRemap = new StackStorage();
|
||||
|
||||
final DictionaryInt stackTracesCache = new DictionaryInt();
|
||||
final MethodCache methodsCache;
|
||||
|
||||
// reusable array to (temporary) store (potentially) new stack trace
|
||||
int[] cachedStackTrace = new int[4096];
|
||||
|
||||
State(JfrConverter converter, long blockDurationMs) {
|
||||
sampleList = new SampleList(blockDurationMs);
|
||||
methodsCache = new MethodCache(converter);
|
||||
}
|
||||
|
||||
public void addEvent(int stackTraceId, int extra, byte type, long timeMs) {
|
||||
if (sampleList.getRecordsCount() >= LIMIT) {
|
||||
return;
|
||||
}
|
||||
if (extra == 0) {
|
||||
sampleList.add(stackTracesCache.get(stackTraceId), timeMs);
|
||||
return;
|
||||
}
|
||||
|
||||
int id = stackTracesCache.get((long) extra << 32 | stackTraceId, -1);
|
||||
if (id != -1) {
|
||||
sampleList.add(id, timeMs);
|
||||
return;
|
||||
}
|
||||
|
||||
int prototypeId = stackTracesCache.get(stackTraceId);
|
||||
int[] prototype = stackTracesRemap.get(prototypeId);
|
||||
|
||||
id = stackTracesRemap.indexWithPrototype(prototype, methodsCache.indexForClass(extra, type));
|
||||
stackTracesCache.put((long) extra << 32 | stackTraceId, id);
|
||||
|
||||
sampleList.add(id, timeMs);
|
||||
}
|
||||
|
||||
public void addStack(long id, long[] methods, int[] locations, byte[] types, int size) {
|
||||
int[] stackTrace = cachedStackTrace;
|
||||
if (stackTrace.length < size) {
|
||||
cachedStackTrace = stackTrace = new int[size * 2];
|
||||
}
|
||||
|
||||
for (int i = size - 1; i >= 0; i--) {
|
||||
long methodId = methods[i];
|
||||
byte type = types[i];
|
||||
int location = locations[i];
|
||||
|
||||
int index = size - 1 - i;
|
||||
boolean firstMethodInTrace = index == 0;
|
||||
|
||||
stackTrace[index] = methodsCache.index(methodId, location, type, firstMethodInTrace);
|
||||
}
|
||||
|
||||
stackTracesCache.put(id, stackTracesRemap.index(stackTrace, size));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.io.PrintStream;
|
||||
|
||||
public class HtmlOut {
|
||||
|
||||
private final PrintStream out;
|
||||
|
||||
private int pos;
|
||||
|
||||
public HtmlOut(PrintStream out) {
|
||||
this.out = out;
|
||||
}
|
||||
|
||||
public int pos() {
|
||||
return pos;
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
pos = 0;
|
||||
}
|
||||
|
||||
public void nextByte(int c) {
|
||||
switch (c) {
|
||||
case 0:
|
||||
c = 127;
|
||||
break;
|
||||
case '\r':
|
||||
c = 126;
|
||||
break;
|
||||
case '&':
|
||||
c = 125;
|
||||
break;
|
||||
case '<':
|
||||
c = 124;
|
||||
break;
|
||||
case '>':
|
||||
c = 123;
|
||||
break;
|
||||
}
|
||||
out.write(c);
|
||||
pos++;
|
||||
}
|
||||
|
||||
public void writeVar(long v) {
|
||||
while (v >= 61) {
|
||||
int b = 61 + (int) (v % 61);
|
||||
nextByte(b);
|
||||
v /= 61;
|
||||
}
|
||||
nextByte((int) v);
|
||||
}
|
||||
|
||||
public void write6(int v) {
|
||||
if ((v & ~0x3F) != 0) {
|
||||
throw new IllegalArgumentException("Value " + v + " is out of bounds");
|
||||
}
|
||||
nextByte(v);
|
||||
}
|
||||
|
||||
public void write18(int v) {
|
||||
if ((v & ~0x3FFFF) != 0) {
|
||||
throw new IllegalArgumentException("Value " + v + " is out of bounds");
|
||||
}
|
||||
for (int i = 0; i < 3; i++) {
|
||||
nextByte(v & 0x3F);
|
||||
v >>>= 6;
|
||||
}
|
||||
}
|
||||
|
||||
public void write30(int v) {
|
||||
if ((v & ~0x3FFFFFFF) != 0) {
|
||||
throw new IllegalArgumentException("Value " + v + " is out of bounds");
|
||||
}
|
||||
for (int i = 0; i < 5; i++) {
|
||||
nextByte(v & 0x3F);
|
||||
v >>>= 6;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.PriorityQueue;
|
||||
|
||||
public class HuffmanEncoder {
|
||||
|
||||
private final long[] decodeTable; // 8 bit for bits count, 56 value
|
||||
private final long[] encodeTable; // 8 bit for bits count, 56 bits
|
||||
|
||||
private int data;
|
||||
private int bits;
|
||||
|
||||
// log2(123^9) = 62.4826305481 > 62 bits, 0.7% space lost, but it is expensive to decode (no support for int64 in js)
|
||||
// log2(123^4) = 27.7700580214 > 27 bits, 2.8% space lost, but it is cheap to decode (using one int32)
|
||||
private static final int MAX_BITS = 27;
|
||||
public final int[] values = new int[4]; // 0..122
|
||||
|
||||
public HuffmanEncoder(int[] frequencies, int maxFrequencyIndex) {
|
||||
PriorityQueue<Node> minHeap = new PriorityQueue<>(maxFrequencyIndex + 1);
|
||||
for (int i = 0; i <= maxFrequencyIndex; i++) {
|
||||
int frequency = frequencies[i];
|
||||
if (frequency == 0) {
|
||||
continue;
|
||||
}
|
||||
minHeap.add(new Node(frequency, i));
|
||||
}
|
||||
|
||||
while (minHeap.size() > 1) {
|
||||
Node left = minHeap.remove();
|
||||
Node right = minHeap.remove();
|
||||
|
||||
minHeap.add(new Node(left, right));
|
||||
}
|
||||
|
||||
long[] decodeTable = new long[maxFrequencyIndex + 1];
|
||||
minHeap.remove().fillTable(decodeTable, 0);
|
||||
Arrays.sort(decodeTable);
|
||||
for (int i = 0; i < decodeTable.length; i++) {
|
||||
if (decodeTable[i] != 0) {
|
||||
if (i != 0) {
|
||||
long[] nextDecodeTable = new long[decodeTable.length - i];
|
||||
System.arraycopy(decodeTable, i, nextDecodeTable, 0, nextDecodeTable.length);
|
||||
decodeTable = nextDecodeTable;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
this.decodeTable = decodeTable;
|
||||
|
||||
encodeTable = new long[maxFrequencyIndex + 1];
|
||||
encodeTable[(int) decodeTable[0]] = decodeTable[0] & 0xFF00_0000_0000_0000L;
|
||||
long code = 0;
|
||||
|
||||
for (int i = 1; i < decodeTable.length; i++) {
|
||||
long decodePrev = decodeTable[i - 1];
|
||||
long decodeNow = decodeTable[i];
|
||||
|
||||
long prevCount = decodePrev >>> 56;
|
||||
long nowCount = decodeNow >>> 56;
|
||||
|
||||
code = (code + 1) << (nowCount - prevCount);
|
||||
|
||||
int value = (int) decodeNow;
|
||||
encodeTable[value] = nowCount << 56 | code;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean append(int value) {
|
||||
boolean hasOverflow = false;
|
||||
|
||||
long v = encodeTable[value];
|
||||
int bits = (int) (v >>> 56);
|
||||
for (long i = 1L << (bits - 1); i > 0; i >>>= 1) {
|
||||
this.data = this.data << 1 | ((v & i) == 0 ? 0 : 1);
|
||||
if (++this.bits == MAX_BITS) {
|
||||
hasOverflow = true;
|
||||
flush();
|
||||
}
|
||||
}
|
||||
|
||||
return hasOverflow;
|
||||
}
|
||||
|
||||
public boolean flushIfNeed() {
|
||||
if (bits == 0) {
|
||||
return false;
|
||||
}
|
||||
this.data = this.data << (MAX_BITS - bits);
|
||||
flush();
|
||||
return true;
|
||||
}
|
||||
|
||||
public void flush() {
|
||||
data = Integer.reverse(data) >>> 5;
|
||||
|
||||
values[3] = data % 123;
|
||||
data /= 123;
|
||||
values[2] = data % 123;
|
||||
data /= 123;
|
||||
values[1] = data % 123;
|
||||
data /= 123;
|
||||
values[0] = data;
|
||||
data = 0;
|
||||
|
||||
bits = 0;
|
||||
}
|
||||
|
||||
public long[] calculateOutputTable() {
|
||||
return decodeTable;
|
||||
}
|
||||
|
||||
private static class Node implements Comparable<Node> {
|
||||
final int frequency;
|
||||
final int value;
|
||||
|
||||
Node left, right;
|
||||
|
||||
Node(int frequency, int value) {
|
||||
this.frequency = frequency;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public Node(Node left, Node right) {
|
||||
this.left = left;
|
||||
this.right = right;
|
||||
this.frequency = left.frequency + right.frequency;
|
||||
this.value = -1;
|
||||
}
|
||||
|
||||
public void fillTable(long[] table, long bitsCount) {
|
||||
if (value >= 0) {
|
||||
table[value] = bitsCount | value;
|
||||
return;
|
||||
}
|
||||
left.fillTable(table, bitsCount + 0x0100_0000_0000_0000L);
|
||||
right.fillTable(table, bitsCount + 0x0100_0000_0000_0000L);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Node o) {
|
||||
// frequencies are strictly positive
|
||||
return frequency - o.frequency;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,173 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class LzNodeTree {
|
||||
|
||||
private static final int INITIAL_CAPACITY = 2 * 1024 * 1024;
|
||||
|
||||
// hash(methodId << 32 | parentNodeId) -> methodId << 32 | parentNodeId
|
||||
private long[] keys; // reused by SynonymTable
|
||||
// hash(methodId << 32 | parentNodeId) -> childNodeId
|
||||
private int[] values; // can be reused after buildLz78TreeAndPrepareData
|
||||
|
||||
// (nodeId - 1) -> methodId << 32 | parentNodeId
|
||||
private long[] outputData; // can be reused after writeTree:130!
|
||||
// nodeId -> childrenCount
|
||||
private int[] childrenCount; // reused by SynonymTable
|
||||
// nodeId -> parentNodeId << 32 | lengthToRoot
|
||||
private long[] lengthToRoot;
|
||||
|
||||
private int storageSize = 0;
|
||||
private int nodesCount = 1;
|
||||
|
||||
public LzNodeTree() {
|
||||
keys = new long[INITIAL_CAPACITY];
|
||||
values = new int[INITIAL_CAPACITY];
|
||||
|
||||
outputData = new long[INITIAL_CAPACITY / 2];
|
||||
childrenCount = new int[INITIAL_CAPACITY / 2];
|
||||
lengthToRoot = new long[INITIAL_CAPACITY / 2];
|
||||
}
|
||||
|
||||
public int appendChild(int parentNode, int methodId) {
|
||||
long method = (long) methodId << 32;
|
||||
long key = method | parentNode;
|
||||
|
||||
int mask = keys.length - 1;
|
||||
int i = hashCode(key) & mask;
|
||||
while (true) {
|
||||
long k = keys[i];
|
||||
if (k == 0) {
|
||||
break;
|
||||
}
|
||||
if (k == key) {
|
||||
return values[i];
|
||||
}
|
||||
i = (i + 1) & mask;
|
||||
}
|
||||
|
||||
if (nodesCount >= outputData.length) {
|
||||
outputData = Arrays.copyOf(outputData, nodesCount + nodesCount / 2);
|
||||
childrenCount = Arrays.copyOf(childrenCount, nodesCount + nodesCount / 2);
|
||||
lengthToRoot = Arrays.copyOf(lengthToRoot, nodesCount + nodesCount / 2);
|
||||
}
|
||||
|
||||
lengthToRoot[nodesCount] = ((int) lengthToRoot[parentNode] + 1) | ((long) parentNode << 32);
|
||||
outputData[nodesCount - 1] = key;
|
||||
keys[i] = key;
|
||||
values[i] = nodesCount;
|
||||
|
||||
if (nodesCount * 2 > keys.length) {
|
||||
resize(keys.length * 2);
|
||||
}
|
||||
nodesCount++;
|
||||
|
||||
childrenCount[parentNode]--; // negotiation for better sort
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
public long[] treeData() {
|
||||
return outputData;
|
||||
}
|
||||
|
||||
public int treeDataSize() {
|
||||
return nodesCount - 1;
|
||||
}
|
||||
|
||||
public int extractParentId(long treeElement) {
|
||||
return (int) treeElement;
|
||||
}
|
||||
|
||||
public int extractMethodId(long treeElement) {
|
||||
return (int) (treeElement >>> 32);
|
||||
}
|
||||
|
||||
public void markNodeAsLastlyUsed(int nodeId) {
|
||||
long ltr = lengthToRoot[nodeId];
|
||||
int parent = (int) (ltr >>> 32);
|
||||
if (parent >= 0) {
|
||||
lengthToRoot[nodeId] = ltr | 0x8000000000000000L;
|
||||
do {
|
||||
ltr = lengthToRoot[parent];
|
||||
lengthToRoot[parent] = ltr | 0xC000000000000000L;
|
||||
parent = (int) (ltr >>> 32);
|
||||
} while (parent > 0);
|
||||
}
|
||||
}
|
||||
|
||||
// destroys values
|
||||
public void compactTree(int[] remapAsWell, int fromIndex, int toIndex) {
|
||||
int[] mappings = values;
|
||||
mappings[0] = 0;
|
||||
int nodes = 1;
|
||||
int storageSize = 0;
|
||||
for (int oldNodeID = 1; oldNodeID < nodesCount; oldNodeID++) {
|
||||
long ltr = lengthToRoot[oldNodeID];
|
||||
if (ltr >= 0) {
|
||||
// unused
|
||||
continue;
|
||||
}
|
||||
if ((ltr & 0x4000000000000000L) == 0) {
|
||||
storageSize += (int) ltr;
|
||||
}
|
||||
mappings[oldNodeID] = nodes;
|
||||
childrenCount[nodes] = childrenCount[oldNodeID];
|
||||
long out = outputData[oldNodeID - 1];
|
||||
long outMethod = 0xFFFFFFFF00000000L & out;
|
||||
int oldParent = (int) out;
|
||||
outputData[nodes - 1] = outMethod | mappings[oldParent];
|
||||
nodes++;
|
||||
}
|
||||
for (int i = fromIndex; i < toIndex; i++) {
|
||||
remapAsWell[i] = mappings[remapAsWell[i]];
|
||||
}
|
||||
this.storageSize = storageSize;
|
||||
this.nodesCount = nodes;
|
||||
}
|
||||
|
||||
// destroys keys and childrenCount arrays
|
||||
public SynonymTable extractSynonymTable() {
|
||||
return new SynonymTable(keys, childrenCount, nodesCount);
|
||||
}
|
||||
|
||||
public int storageSize() {
|
||||
return storageSize;
|
||||
}
|
||||
|
||||
public int nodesCount() {
|
||||
return nodesCount;
|
||||
}
|
||||
|
||||
private void resize(int newCapacity) {
|
||||
long[] newKeys = new long[newCapacity];
|
||||
int[] newValues = new int[newCapacity];
|
||||
int mask = newKeys.length - 1;
|
||||
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
if (keys[i] != 0) {
|
||||
for (int j = hashCode(keys[i]) & mask; ; j = (j + 1) & mask) {
|
||||
if (newKeys[j] == 0) {
|
||||
newKeys[j] = keys[i];
|
||||
newValues[j] = values[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
keys = newKeys;
|
||||
values = newValues;
|
||||
}
|
||||
|
||||
private static int hashCode(long key) {
|
||||
key *= 0xc6a4a7935bd1e995L;
|
||||
return (int) (key ^ (key >>> 32));
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import one.convert.Frame;
|
||||
|
||||
public class Method {
|
||||
|
||||
public final int className;
|
||||
public final int methodName;
|
||||
public final int location;
|
||||
public final byte type;
|
||||
public final boolean start;
|
||||
|
||||
final long originalMethodId;
|
||||
|
||||
Method next;
|
||||
|
||||
public int frequencyOrNewMethodId;
|
||||
public int index;
|
||||
|
||||
Method(int className, int methodName) {
|
||||
this(0, className, methodName, 0, Frame.TYPE_NATIVE, true);
|
||||
}
|
||||
|
||||
Method(long originalMethodId, int className, int methodName, int location, byte type, boolean start) {
|
||||
this.originalMethodId = originalMethodId;
|
||||
this.className = className;
|
||||
this.methodName = methodName;
|
||||
this.location = location;
|
||||
this.type = type;
|
||||
this.start = start;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Method method = (Method) o;
|
||||
|
||||
if (className != method.className) return false;
|
||||
if (methodName != method.methodName) return false;
|
||||
if (location != method.location) return false;
|
||||
if (type != method.type) return false;
|
||||
return start == method.start;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = className;
|
||||
result = 31 * result + methodName;
|
||||
result = 31 * result + location;
|
||||
result = 31 * result + (int) type;
|
||||
result = 31 * result + (start ? 1 : 0);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import one.convert.Index;
|
||||
import one.convert.JfrConverter;
|
||||
import one.jfr.Dictionary;
|
||||
|
||||
public class MethodCache {
|
||||
private final JfrConverter converter;
|
||||
private final Index<String> symbolTable = new Index<>(String.class, "", 32768);
|
||||
private final Index<Method> methodIndex = new Index<>(Method.class, new Method(symbolTable.index("all"), 0), 32768);
|
||||
|
||||
private final Method[] nearCache = new Method[256 * 256];
|
||||
// It should be better to create dictionary with linked methods instead of open addressed hash table
|
||||
// but in most cases all methods should fit nearCache, so less code is better
|
||||
private final Dictionary<Method> farMethods = new Dictionary<>(1024);
|
||||
|
||||
public MethodCache(JfrConverter converter) {
|
||||
this.converter = converter;
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
Arrays.fill(nearCache, null);
|
||||
farMethods.clear();
|
||||
}
|
||||
|
||||
public int index(long methodId, int location, byte type, boolean firstInStack) {
|
||||
Method method;
|
||||
if (methodId < nearCache.length) {
|
||||
int mid = (int) methodId;
|
||||
method = nearCache[mid];
|
||||
if (method == null) {
|
||||
method = createMethod(methodId, location, type, firstInStack);
|
||||
nearCache[mid] = method;
|
||||
return method.index = methodIndex.index(method);
|
||||
}
|
||||
} else {
|
||||
// this should be extremely rare case
|
||||
method = farMethods.get(methodId);
|
||||
if (method == null) {
|
||||
method = createMethod(methodId, location, type, firstInStack);
|
||||
farMethods.put(methodId, method);
|
||||
return method.index = methodIndex.index(method);
|
||||
}
|
||||
}
|
||||
|
||||
Method last = null;
|
||||
Method prototype = null;
|
||||
while (method != null) {
|
||||
if (method.originalMethodId == methodId) {
|
||||
if (method.location == location && method.type == type && method.start == firstInStack) {
|
||||
return method.index;
|
||||
}
|
||||
prototype = method;
|
||||
}
|
||||
last = method;
|
||||
method = method.next;
|
||||
}
|
||||
|
||||
if (prototype != null) {
|
||||
last.next = method = new Method(methodId, prototype.className, prototype.methodName, location, type, firstInStack);
|
||||
return method.index = methodIndex.index(method);
|
||||
}
|
||||
|
||||
last.next = method = createMethod(methodId, location, type, firstInStack);
|
||||
|
||||
return method.index = methodIndex.index(method);
|
||||
}
|
||||
|
||||
public int indexForClass(int extra, byte type) {
|
||||
long methodId = (long) extra << 32 | 1L << 63;
|
||||
Method method = farMethods.get(methodId);
|
||||
Method last = null;
|
||||
while (method != null) {
|
||||
if (method.originalMethodId == methodId) {
|
||||
if (method.location == -1 && method.type == type && !method.start) {
|
||||
return method.index;
|
||||
}
|
||||
}
|
||||
last = method;
|
||||
method = method.next;
|
||||
}
|
||||
|
||||
String javaClassName = converter.getClassName(extra);
|
||||
method = new Method(methodId, symbolTable.index(javaClassName), 0, -1, type, false);
|
||||
if (last == null) {
|
||||
farMethods.put(methodId, method);
|
||||
} else {
|
||||
last.next = method;
|
||||
}
|
||||
return method.index = methodIndex.index(method);
|
||||
}
|
||||
|
||||
private Method createMethod(long methodId, int location, byte type, boolean firstInStack) {
|
||||
StackTraceElement ste = converter.getStackTraceElement(methodId, type, location);
|
||||
int className = symbolTable.index(ste.getClassName());
|
||||
int methodName = symbolTable.index(ste.getMethodName());
|
||||
return new Method(methodId, className, methodName, location, type, firstInStack);
|
||||
}
|
||||
|
||||
public String[] orderedSymbolTable() {
|
||||
return symbolTable.keys();
|
||||
}
|
||||
|
||||
public Index<Method> methodsIndex() {
|
||||
return methodIndex;
|
||||
}
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class SampleList {
|
||||
|
||||
private static final int DEFAULT_SAMPLES_COUNT = 10_000_000;
|
||||
|
||||
private final long blockDurationMs;
|
||||
|
||||
// highest 32 bits for time block index, lowest 32 bits for stack id
|
||||
private long[] data = new long[DEFAULT_SAMPLES_COUNT];
|
||||
|
||||
private long initialTime = 0;
|
||||
private int recordsCount = 0;
|
||||
|
||||
public SampleList(long blockDurationMs) {
|
||||
this.blockDurationMs = blockDurationMs;
|
||||
}
|
||||
|
||||
public void add(int stackId, long timeMs) {
|
||||
if (initialTime == 0) {
|
||||
initialTime = timeMs;
|
||||
data[recordsCount++] = stackId;
|
||||
return;
|
||||
}
|
||||
if (recordsCount >= data.length) {
|
||||
data = Arrays.copyOf(data, data.length * 3 / 2);
|
||||
}
|
||||
|
||||
int currentTimeBlock = (int) ((timeMs - initialTime) / blockDurationMs);
|
||||
data[recordsCount++] = (long) currentTimeBlock << 32 | stackId;
|
||||
}
|
||||
|
||||
public Result samples() {
|
||||
Arrays.sort(data, 0, recordsCount);
|
||||
|
||||
int firstBlockId = (int) (data[0] >> 32);
|
||||
int lastBlockId = (int) (data[recordsCount - 1] >> 32);
|
||||
|
||||
int blocksCount = lastBlockId - firstBlockId + 1;
|
||||
|
||||
int[] blockSizes = new int[blocksCount];
|
||||
int[] stackIds = new int[recordsCount];
|
||||
|
||||
int stackIdsPos = 0;
|
||||
int currentBlockIndex = 0;
|
||||
int currentBlockSize = 0;
|
||||
int currentBlockId = firstBlockId;
|
||||
|
||||
outer:
|
||||
while (stackIdsPos < stackIds.length) {
|
||||
long currentData = data[stackIdsPos];
|
||||
int blockId = (int) (currentData >> 32);
|
||||
while (currentBlockId != blockId) {
|
||||
blockSizes[currentBlockIndex++] = currentBlockSize;
|
||||
currentBlockSize = 0;
|
||||
currentBlockId++;
|
||||
if (currentBlockId > lastBlockId) {
|
||||
break outer;
|
||||
}
|
||||
}
|
||||
|
||||
currentBlockSize++;
|
||||
stackIds[stackIdsPos++] = (int) (currentData & 0xFFFFFFFFL) - 1;
|
||||
}
|
||||
|
||||
if (currentBlockId <= lastBlockId) {
|
||||
blockSizes[currentBlockIndex] = currentBlockSize;
|
||||
}
|
||||
|
||||
return new Result(blockSizes, stackIds);
|
||||
}
|
||||
|
||||
public int getRecordsCount() {
|
||||
return recordsCount;
|
||||
}
|
||||
|
||||
public static class Result {
|
||||
public final int[] blockSizes;
|
||||
public final int[] stackIds;
|
||||
|
||||
public Result(int[] blockSizes, int[] stackIds) {
|
||||
this.blockSizes = blockSizes;
|
||||
this.stackIds = stackIds;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class StackStorage {
|
||||
|
||||
protected static final int INITIAL_CAPACITY = 16 * 1024;
|
||||
|
||||
private int size;
|
||||
|
||||
// highest 32 bits for index, lowest 32 bits for hash
|
||||
private long[] meta;
|
||||
|
||||
// ordered incrementally
|
||||
private int[][] values;
|
||||
|
||||
public StackStorage() {
|
||||
this(INITIAL_CAPACITY);
|
||||
}
|
||||
|
||||
public StackStorage(int initialCapacity) {
|
||||
meta = new long[initialCapacity * 2];
|
||||
values = new int[initialCapacity][];
|
||||
}
|
||||
|
||||
public int[] get(int id) {
|
||||
return values[id - 1];
|
||||
}
|
||||
|
||||
public int index(int[] stack, int stackSize) {
|
||||
int mask = meta.length - 1;
|
||||
int hashCode = murmur(stack, stackSize);
|
||||
int i = hashCode & mask;
|
||||
while (true) {
|
||||
long currentMeta = meta[i];
|
||||
if (currentMeta == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
int hash = (int) currentMeta;
|
||||
if (hash == hashCode) {
|
||||
int index = (int) (currentMeta >>> 32);
|
||||
int[] value = values[index];
|
||||
if (equals(value, stack, stackSize)) {
|
||||
return index + 1;
|
||||
}
|
||||
}
|
||||
|
||||
i = (i + 1) & mask;
|
||||
}
|
||||
|
||||
values[size] = Arrays.copyOf(stack, stackSize);
|
||||
meta[i] = (long) size << 32 | (hashCode & 0xFFFFFFFFL);
|
||||
size++;
|
||||
|
||||
if (size * 2 > values.length) {
|
||||
resize(values.length * 2);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
public int indexWithPrototype(int[] prototype, int append) {
|
||||
int mask = meta.length - 1;
|
||||
int hashCode = murmurWithExtra(prototype, append);
|
||||
int i = hashCode & mask;
|
||||
while (true) {
|
||||
long currentMeta = meta[i];
|
||||
if (currentMeta == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
int hash = (int) currentMeta;
|
||||
if (hash == hashCode) {
|
||||
int index = (int) (currentMeta >>> 32);
|
||||
int[] value = values[index - 1];
|
||||
if (equalsWithExtra(value, prototype, append)) {
|
||||
return index;
|
||||
}
|
||||
}
|
||||
|
||||
i = (i + 1) & mask;
|
||||
}
|
||||
|
||||
int[] stack = Arrays.copyOf(prototype, prototype.length + 1);
|
||||
stack[prototype.length] = append;
|
||||
values[size] = stack;
|
||||
meta[i] = (long) size << 32 | (hashCode & 0xFFFFFFFFL);
|
||||
size++;
|
||||
|
||||
if (size * 2 > values.length) {
|
||||
resize(values.length * 2);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
public int[][] orderedTraces() {
|
||||
return Arrays.copyOf(values, size);
|
||||
}
|
||||
|
||||
protected void resize(int newCapacity) {
|
||||
long[] newMeta = new long[newCapacity * 2];
|
||||
int mask = newMeta.length - 1;
|
||||
|
||||
for (long m : meta) {
|
||||
if (m != 0) {
|
||||
int hashCode = (int) m;
|
||||
for (int j = hashCode & mask; ; j = (j + 1) & mask) {
|
||||
if (newMeta[j] == 0) {
|
||||
newMeta[j] = m;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
meta = newMeta;
|
||||
values = Arrays.copyOf(values, newCapacity);
|
||||
}
|
||||
|
||||
private boolean equals(int[] a, int[] b, int bSize) {
|
||||
if (a.length != bSize) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < bSize; i++) {
|
||||
if (a[i] != b[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean equalsWithExtra(int[] a, int[] b, int extra) {
|
||||
if (a.length != b.length + 1) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < b.length; i++) {
|
||||
if (a[i] != b[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return a[b.length] == extra;
|
||||
}
|
||||
|
||||
private static int murmur(int[] data, int size) {
|
||||
int m = 0x5bd1e995;
|
||||
int h = 0x9747b28c ^ data.length;
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
int k = data[i];
|
||||
k *= m;
|
||||
k ^= k >>> 24;
|
||||
k *= m;
|
||||
h *= m;
|
||||
h ^= k;
|
||||
}
|
||||
|
||||
h ^= h >>> 13;
|
||||
h *= m;
|
||||
h ^= h >>> 15;
|
||||
|
||||
return h;
|
||||
}
|
||||
|
||||
private static int murmurWithExtra(int[] data, int extra) {
|
||||
int m = 0x5bd1e995;
|
||||
int h = 0x9747b28c ^ (data.length + 1);
|
||||
|
||||
for (int k : data) {
|
||||
k *= m;
|
||||
k ^= k >>> 24;
|
||||
k *= m;
|
||||
h *= m;
|
||||
h ^= k;
|
||||
}
|
||||
|
||||
int k = extra * m;
|
||||
k ^= k >>> 24;
|
||||
k *= m;
|
||||
h *= m;
|
||||
h ^= k;
|
||||
|
||||
h ^= h >>> 13;
|
||||
h *= m;
|
||||
h ^= h >>> 15;
|
||||
|
||||
return h;
|
||||
}
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class SynonymTable {
|
||||
|
||||
private final long[] synonyms;
|
||||
private final int[] childrenCountOrNodeSynonym;
|
||||
private final int nodesCount;
|
||||
|
||||
private int synonymsCount;
|
||||
|
||||
public SynonymTable(long[] synonyms, int[] childrenCount, int nodesCount) {
|
||||
this.synonyms = synonyms;
|
||||
this.childrenCountOrNodeSynonym = childrenCount;
|
||||
this.nodesCount = nodesCount;
|
||||
}
|
||||
|
||||
public void calculateSynonyms() {
|
||||
int[] childrenCount = childrenCountOrNodeSynonym;
|
||||
for (int i = 0; i < nodesCount; i++) {
|
||||
synonyms[i] = (long) childrenCount[i] << 32 | i;
|
||||
}
|
||||
|
||||
Arrays.sort(synonyms, 0, nodesCount);
|
||||
|
||||
synonymsCount = Math.min(61 * 61, nodesCount);
|
||||
|
||||
int[] nodeSynonyms = childrenCountOrNodeSynonym;
|
||||
for (int i = 0; i < nodesCount; i++) {
|
||||
nodeSynonyms[i] = synonymsCount + i;
|
||||
}
|
||||
for (int i = 0; i < synonymsCount; i++) {
|
||||
nodeSynonyms[(int) (synonyms[i] & 0xFFFFFFFFL)] = i;
|
||||
}
|
||||
}
|
||||
|
||||
public int synonymsCount() {
|
||||
return synonymsCount;
|
||||
}
|
||||
|
||||
public int synonymAt(int synonymIndex) {
|
||||
return (int) (synonyms[synonymIndex] & 0xFFFFFFFFL) + synonymsCount;
|
||||
}
|
||||
|
||||
public int nodeIdOrSynonym(int node) {
|
||||
return childrenCountOrNodeSynonym[node];
|
||||
}
|
||||
|
||||
public int[] reset() {
|
||||
int[] childrenCount = childrenCountOrNodeSynonym;
|
||||
Arrays.fill(childrenCount, 0, nodesCount, 0);
|
||||
return childrenCount;
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,17 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2020 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package one.jfr;
|
||||
|
||||
@@ -1,12 +1,21 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2020 Andrei Pangin
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package one.jfr;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* Fast and compact long->Object map.
|
||||
*/
|
||||
@@ -18,22 +27,8 @@ public class Dictionary<T> {
|
||||
private int size;
|
||||
|
||||
public Dictionary() {
|
||||
this(INITIAL_CAPACITY);
|
||||
}
|
||||
|
||||
public Dictionary(int initialCapacity) {
|
||||
this.keys = new long[initialCapacity];
|
||||
this.values = new Object[initialCapacity];
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
Arrays.fill(keys, 0);
|
||||
Arrays.fill(values, null);
|
||||
size = 0;
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return size;
|
||||
this.keys = new long[INITIAL_CAPACITY];
|
||||
this.values = new Object[INITIAL_CAPACITY];
|
||||
}
|
||||
|
||||
public void put(long key, T value) {
|
||||
@@ -41,21 +36,17 @@ public class Dictionary<T> {
|
||||
throw new IllegalArgumentException("Zero key not allowed");
|
||||
}
|
||||
|
||||
if (++size * 2 > keys.length) {
|
||||
resize(keys.length * 2);
|
||||
}
|
||||
|
||||
int mask = keys.length - 1;
|
||||
int i = hashCode(key) & mask;
|
||||
while (keys[i] != 0) {
|
||||
if (keys[i] == key) {
|
||||
values[i] = value;
|
||||
return;
|
||||
}
|
||||
while (keys[i] != 0 && keys[i] != key) {
|
||||
i = (i + 1) & mask;
|
||||
}
|
||||
keys[i] = key;
|
||||
values[i] = value;
|
||||
|
||||
if (++size * 2 > keys.length) {
|
||||
resize(keys.length * 2);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@@ -78,8 +69,9 @@ public class Dictionary<T> {
|
||||
}
|
||||
|
||||
public int preallocate(int count) {
|
||||
if (count * 2 > keys.length) {
|
||||
resize(Integer.highestOneBit(count * 4 - 1));
|
||||
int newSize = size + count;
|
||||
if (newSize * 2 > keys.length) {
|
||||
resize(Integer.highestOneBit(newSize * 4 - 1));
|
||||
}
|
||||
return count;
|
||||
}
|
||||
@@ -106,7 +98,6 @@ public class Dictionary<T> {
|
||||
}
|
||||
|
||||
private static int hashCode(long key) {
|
||||
key *= 0xc6a4a7935bd1e995L;
|
||||
return (int) (key ^ (key >>> 32));
|
||||
}
|
||||
|
||||
|
||||