Compare commits
459 Commits
launcher-w
...
f6ca3c1ff8
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6ca3c1ff8 | ||
|
|
86adc1605a | ||
|
|
804df3ac8e | ||
|
|
8aab346c3b | ||
|
|
7bd911a007 | ||
|
|
2df2733d1d | ||
|
|
4d5441f2cd | ||
|
|
cc9e91bd8f | ||
|
|
e899de6a9c | ||
|
|
fbc3942095 | ||
|
|
6afb9572c1 | ||
|
|
f763e195ee | ||
|
|
f1b87ead07 | ||
|
|
4dda6c40af | ||
|
|
264b8ab5da | ||
|
|
c383a35ff4 | ||
|
|
82ae80a660 | ||
|
|
7e92b5cdac | ||
|
|
fe69e4fab2 | ||
|
|
d94581c24c | ||
|
|
f3c31942fb | ||
|
|
a246ced814 | ||
|
|
8d653dd5e0 | ||
|
|
cc0eab1789 | ||
|
|
842b612e08 | ||
|
|
ff4336d136 | ||
|
|
e1dd4c05f6 | ||
|
|
174dc31d88 | ||
|
|
dbd9fc7520 | ||
|
|
dc69cf4b80 | ||
|
|
abc8b7f493 | ||
|
|
4ea8e5bbb6 | ||
|
|
71ad47a46e | ||
|
|
0023021ddf | ||
|
|
444d0e6353 | ||
|
|
7e2ed0e77e | ||
|
|
68244fbf6f | ||
|
|
31042f13bc | ||
|
|
a3c6d92d39 | ||
|
|
036c87e50d | ||
|
|
15b1161f57 | ||
|
|
b02434bd9d | ||
|
|
9c293283f2 | ||
|
|
3aba5ee521 | ||
|
|
078935591f | ||
|
|
dc88d3f756 | ||
|
|
a071e8a2f8 | ||
|
|
6e6acc1769 | ||
|
|
709a777393 | ||
|
|
b9d6843ae5 | ||
|
|
3722d05ba0 | ||
|
|
872be63220 | ||
|
|
a89d7ddeba | ||
|
|
f789c4f748 | ||
|
|
d43d328b58 | ||
|
|
037c09906d | ||
|
|
f352361814 | ||
|
|
19b22efeff | ||
|
|
16fdebf78c | ||
|
|
0b73f655a8 | ||
|
|
cf4739a61b | ||
|
|
757bf8edd3 | ||
|
|
d0d16240d4 | ||
|
|
fc9b5c85cf | ||
|
|
436d5b5066 | ||
|
|
4663784b98 | ||
|
|
d2172a6382 | ||
|
|
93b6ae376d | ||
|
|
ee4ac6e888 | ||
|
|
865e8b91f8 | ||
|
|
2a4f329cba | ||
|
|
9c425ca74f | ||
|
|
d871819848 | ||
|
|
bf84fadb3c | ||
|
|
fde780e275 | ||
|
|
6e04336375 | ||
|
|
a77d091e08 | ||
|
|
999f0c7ae3 | ||
|
|
cdaf6e76ba | ||
|
|
3a493bedc4 | ||
|
|
7b24ad89b6 | ||
|
|
5bf0e311c2 | ||
|
|
8772214f7e | ||
|
|
3bb1e72d09 | ||
|
|
ea0b34b578 | ||
|
|
e92eb45812 | ||
|
|
d304fd5d75 | ||
|
|
61a676f87f | ||
|
|
b855e0c2c4 | ||
|
|
763616aa17 | ||
|
|
a25e5194bf | ||
|
|
ff24f1220c | ||
|
|
124eca439e | ||
|
|
4bcfe9ee7b | ||
|
|
0b7ee6d830 | ||
|
|
01325ea87c | ||
|
|
78172b7cb0 | ||
|
|
fd269e6450 | ||
|
|
61d48a6b43 | ||
|
|
c6c2fc1497 | ||
|
|
585054661f | ||
|
|
0cb40bee11 | ||
|
|
8c851ddad2 | ||
|
|
8fa4fd0b78 | ||
|
|
9611d55567 | ||
|
|
eb4d126a2d | ||
|
|
85ae06b177 | ||
|
|
872631f82b | ||
|
|
7482988021 | ||
|
|
e647076de5 | ||
|
|
c478490ce9 | ||
|
|
6e10742be1 | ||
|
|
49e56704f9 | ||
|
|
79d9058b18 | ||
|
|
9674d20873 | ||
|
|
538f3a2e48 | ||
|
|
e35113a647 | ||
|
|
8f7e4e19cc | ||
|
|
88b7ba3838 | ||
|
|
7a86354d77 | ||
|
|
fa5ada6747 | ||
|
|
fc2a9b928c | ||
|
|
5aee9cdb03 | ||
|
|
dd0d233499 | ||
|
|
fb673227c7 | ||
|
|
1a15a0e86a | ||
|
|
ea095462ca | ||
|
|
3634cdc1ac | ||
|
|
3e663759da | ||
|
|
97c35ac96c | ||
|
|
6453ccca43 | ||
|
|
f9b78102ce | ||
|
|
861f4f4f63 | ||
|
|
0eba17edd0 | ||
|
|
bfa821b6ce | ||
|
|
e3f646a1d9 | ||
|
|
7338c30d88 | ||
|
|
d97a7d3343 | ||
|
|
07b3e747d1 | ||
|
|
70a13bcd03 | ||
|
|
6f2a9b80f8 | ||
|
|
145fc2dd28 | ||
|
|
6fc51db16e | ||
|
|
49ae9cfe7f | ||
|
|
fcf2734f56 | ||
|
|
2c188fe490 | ||
|
|
f6e850c5f5 | ||
|
|
62307a2418 | ||
|
|
b30f5f1da1 | ||
|
|
af8fabe3db | ||
|
|
2e0e3ab792 | ||
|
|
9f687fb07e | ||
|
|
e052d51323 | ||
|
|
6ebadb87cf | ||
|
|
5454c9bf7f | ||
|
|
1eb40f446c | ||
|
|
df063a6f6e | ||
|
|
d651a7a326 | ||
|
|
512f7b88cf | ||
|
|
ead97aca72 | ||
|
|
6c61fb6c83 | ||
|
|
02a8fdb9f5 | ||
|
|
0e551b0fef | ||
|
|
9b5e3f330a | ||
|
|
89ead820f0 | ||
|
|
b320af7ad3 | ||
|
|
a9e8c8d558 | ||
|
|
ca58e81005 | ||
|
|
a035e3e4d1 | ||
|
|
f62a53ed3d | ||
|
|
8e64342485 | ||
|
|
b6d442b542 | ||
|
|
3237a0ce9b | ||
|
|
c612cd70e9 | ||
|
|
f461a06d23 | ||
|
|
8593be1600 | ||
|
|
88c46da067 | ||
|
|
fc6ffb3d6e | ||
|
|
fd157a8a42 | ||
|
|
2a0bae6e06 | ||
|
|
49b7320521 | ||
|
|
cdedd3fb22 | ||
|
|
da812fca7a | ||
|
|
5930966a92 | ||
|
|
7737df342d | ||
|
|
843f1d9f3e | ||
|
|
733f2a513c | ||
|
|
9824786981 | ||
|
|
5fffdb1eaa | ||
|
|
7bf8528f75 | ||
|
|
80ae8aed19 | ||
|
|
1c1a14c1ec | ||
|
|
83e9bdd9bd | ||
|
|
22ce08f5ef | ||
|
|
7c4385b0b1 | ||
|
|
461a3c1b93 | ||
|
|
5b178bfc5c | ||
|
|
520b897dce | ||
|
|
a70f25e00f | ||
|
|
f79729167a | ||
|
|
f627b3157b | ||
|
|
85fefd2800 | ||
|
|
5091304efd | ||
|
|
c42bf7ad9d | ||
|
|
2b8dffff27 | ||
|
|
09ad6c1663 | ||
|
|
40fd71a8a0 | ||
|
|
557f4adecb | ||
|
|
de54c536dc | ||
|
|
c74107e53f | ||
|
|
b3968f5e38 | ||
|
|
29dd537907 | ||
|
|
0330a6e333 | ||
|
|
9b44c2e99d | ||
|
|
5b4450b85c | ||
|
|
82d13772a5 | ||
|
|
bbca9f1817 | ||
|
|
981619680e | ||
|
|
2b556680dc | ||
|
|
b3f58429f5 | ||
|
|
2844e6c5c1 | ||
|
|
0e1008531b | ||
|
|
19ad42cd23 | ||
|
|
f76833a2c0 | ||
|
|
4b1df29aab | ||
|
|
795da942f7 | ||
|
|
bedffcb080 | ||
|
|
660ffcd5c6 | ||
|
|
60e79e364a | ||
|
|
d89ab7a16c | ||
|
|
d042e0a8db | ||
|
|
3256fde4c1 | ||
|
|
3bbab49e3c | ||
|
|
ed57317281 | ||
|
|
c17de4c220 | ||
|
|
3a9252c677 | ||
|
|
fd8ba8b9ee | ||
|
|
5fe1c47ee3 | ||
|
|
ff203f391a | ||
|
|
ba93f813a9 | ||
|
|
7c3aa59ceb | ||
|
|
25ddfe056b | ||
|
|
7d4157b1c5 | ||
|
|
b3907b43ed | ||
|
|
cf39c3ad9c | ||
|
|
73f0486946 | ||
|
|
f2197dc400 | ||
|
|
8c15cbac99 | ||
|
|
d2c85c18c6 | ||
|
|
b5b41dcaaa | ||
|
|
f5fd5b0863 | ||
|
|
dbcd94fcd6 | ||
|
|
570ead9c13 | ||
|
|
471267bea4 | ||
|
|
67076816c1 | ||
|
|
0c72a8d3e9 | ||
|
|
fdaf1957c0 | ||
|
|
047a6dea1f | ||
|
|
14c7e819b2 | ||
|
|
fa417c85c8 | ||
|
|
6d786b7401 | ||
|
|
387dee13b8 | ||
|
|
164eac4dbd | ||
|
|
7207fc8775 | ||
|
|
b034e4c314 | ||
|
|
39f43006a1 | ||
|
|
b9f3456f89 | ||
|
|
5ce2c34d3e | ||
|
|
e359d161ba | ||
|
|
7b2d1d9c94 | ||
|
|
5030fe5faa | ||
|
|
bc80518125 | ||
|
|
87b7b42ec6 | ||
|
|
80a6e722b6 | ||
|
|
e66a4ab6d6 | ||
|
|
f071146d11 | ||
|
|
ea969d10f9 | ||
|
|
85c72f839f | ||
|
|
6979a9eff2 | ||
|
|
ebd889466b | ||
|
|
2dba71fcf9 | ||
|
|
94a6f9ca61 | ||
|
|
87417ec418 | ||
|
|
41e205b2b2 | ||
|
|
6178c17d92 | ||
|
|
177de58865 | ||
|
|
49cca40e47 | ||
|
|
ef64ae2f2d | ||
|
|
5a853fa26b | ||
|
|
ee75d80622 | ||
|
|
18e3b39db9 | ||
|
|
24007027b3 | ||
|
|
be1380022d | ||
|
|
6761587bb3 | ||
|
|
788e44dffc | ||
|
|
7152ba0477 | ||
|
|
eabbd2f796 | ||
|
|
a78793bed0 | ||
|
|
fe1bc66d4b | ||
|
|
21707d4d7e | ||
|
|
b0dde79fc0 | ||
|
|
5ea64a15c3 | ||
|
|
1d64e18123 | ||
|
|
f5a7c63d0b | ||
|
|
626366b5ad | ||
|
|
21bc5948cf | ||
|
|
15609cdfa0 | ||
|
|
e27198e324 | ||
|
|
ceb1a3126f | ||
|
|
cc76f05e60 | ||
|
|
e6e0494926 | ||
|
|
f71c31af7b | ||
|
|
2d764ccb63 | ||
|
|
75aefa36c9 | ||
|
|
c1ed9b3169 | ||
|
|
cfb9fa45c3 | ||
|
|
e6407ee349 | ||
|
|
3beae04e9d | ||
|
|
bd439d8a04 | ||
|
|
cede318ece | ||
|
|
ff21b118b2 | ||
|
|
3d950aed89 | ||
|
|
fc24d60d1b | ||
|
|
518a7528b4 | ||
|
|
d1498a6c7f | ||
|
|
10fa9ee313 | ||
|
|
29ee888a42 | ||
|
|
6c0aff487b | ||
|
|
b55cb7c973 | ||
|
|
77a5339b2c | ||
|
|
a60f310dae | ||
|
|
e98e76fbba | ||
|
|
944dd405d9 | ||
|
|
0e3a7d7b7a | ||
|
|
fa937dbbc3 | ||
|
|
1b7bb4adca | ||
|
|
21522393a2 | ||
|
|
f334c51b7e | ||
|
|
87c5436731 | ||
|
|
9afed85559 | ||
|
|
7691403e76 | ||
|
|
977a2f446b | ||
|
|
5a3636d5df | ||
|
|
75c71bfbd9 | ||
|
|
ec83ae6d7a | ||
|
|
37829cc30c | ||
|
|
2feb6f6e0e | ||
|
|
9cf87d8834 | ||
|
|
a2b7eecfeb | ||
|
|
3044fb1931 | ||
|
|
8da7831bb1 | ||
|
|
217d25ab9c | ||
|
|
7e6ceb7d5b | ||
|
|
c94fb08e6c | ||
|
|
c578c17527 | ||
|
|
49aadce877 | ||
|
|
78f78cf681 | ||
|
|
5ebc82dd04 | ||
|
|
5054a6b601 | ||
|
|
f0ceda6356 | ||
|
|
16f15de48e | ||
|
|
d7ab6428e9 | ||
|
|
b7d66a5223 | ||
|
|
7e5706121d | ||
|
|
5fe149a639 | ||
|
|
9c37bb0f62 | ||
|
|
04cc4759d4 | ||
|
|
bce178e3a5 | ||
|
|
99df42dcbe | ||
|
|
3ccae7c9b5 | ||
|
|
34cee4b4fd | ||
|
|
f8e887eb9d | ||
|
|
deb1880b5d | ||
|
|
6993349079 | ||
|
|
fc440b0db9 | ||
|
|
25abe28782 | ||
|
|
fcdc5bd200 | ||
|
|
b174febb3e | ||
|
|
ad141d1c15 | ||
|
|
389a710276 | ||
|
|
6682ad6fb0 | ||
|
|
3cf733d589 | ||
|
|
603db84cde | ||
|
|
f863502c8e | ||
|
|
083d85f80a | ||
|
|
e6a319e1b6 | ||
|
|
36168a1f24 | ||
|
|
0cd9726a81 | ||
|
|
6f2c6cc666 | ||
|
|
6c32ce9701 | ||
|
|
adecac7907 | ||
|
|
870833bcc7 | ||
|
|
116504c9f7 | ||
|
|
f59894d912 | ||
|
|
0432101955 | ||
|
|
08f29f82ff | ||
|
|
6bd9dfaf8e | ||
|
|
ecd8f3ac96 | ||
|
|
59d2defaa6 | ||
|
|
62dca46d20 | ||
|
|
5a90a82231 | ||
|
|
da3f5f329c | ||
|
|
63009d09b6 | ||
|
|
8c28cd8731 | ||
|
|
80b59a0101 | ||
|
|
74ffc675be | ||
|
|
4ee2a5df39 | ||
|
|
445bafb861 | ||
|
|
3992d6a25d | ||
|
|
4208d5c31c | ||
|
|
e0885e348e | ||
|
|
061f03d79f | ||
|
|
37ff942ae6 | ||
|
|
1291692c02 | ||
|
|
2d043110ab | ||
|
|
1ed9df96d9 | ||
|
|
9db26318de | ||
|
|
f43a6f8e1c | ||
|
|
f53bfd4a58 | ||
|
|
a386afa3d8 | ||
|
|
e50446081a | ||
|
|
5a093b64fc | ||
|
|
82ebedad09 | ||
|
|
d66717cbcf | ||
|
|
2833cabe67 | ||
|
|
0ed06fa7f1 | ||
|
|
f66056a2c5 | ||
|
|
69c9ddacdd | ||
|
|
6d7f73aec1 | ||
|
|
c299592f15 | ||
|
|
e6a1581ca3 | ||
|
|
69e04795b9 | ||
|
|
c122fde475 | ||
|
|
25fa02e103 | ||
|
|
b9a3737531 | ||
|
|
d92b893826 | ||
|
|
b8025a2449 | ||
|
|
02670ccde5 | ||
|
|
76311145c8 | ||
|
|
179b0b1285 | ||
|
|
40118962ad | ||
|
|
321a712ff8 | ||
|
|
78123a85a7 | ||
|
|
7f712bb4e9 | ||
|
|
09f0a8fef7 | ||
|
|
6207d5dc86 | ||
|
|
c9cb0c1cb2 | ||
|
|
bd095f13ec | ||
|
|
174e295ffb | ||
|
|
76c2024e83 | ||
|
|
e38cd32e70 | ||
|
|
c25345454a | ||
|
|
4e9f0961c0 | ||
|
|
0d90f1c817 | ||
|
|
df4af83dc2 | ||
|
|
f4528dec7c | ||
|
|
30f4f321b8 | ||
|
|
28fbb132c5 | ||
|
|
9660e15b1e |
@@ -3,24 +3,82 @@
|
||||
<head>
|
||||
<meta charset='utf-8'>
|
||||
<style>
|
||||
body {margin: 0; padding: 10px 10px 22px 10px; background-color: #ffffff}
|
||||
:root {--bg: #ffffff; --fg: #000000; --hl-bg: #ffffe0; --hl-border: #ffc000; --link: #0366d6; --legend-bg: #ffffe0; --legend-border: #666666}
|
||||
:root.dark {--bg: #1e1e1e; --fg: #cccccc; --hl-bg: #3a3a00; --hl-border: #8a7000; --link: #58a6ff; --legend-bg: #333333; --legend-border: #888888}
|
||||
body {margin: 0; padding: 10px 10px 22px 10px; background-color: var(--bg); color: var(--fg)}
|
||||
h1 {margin: 5px 0 0 0; font-size: 18px; font-weight: normal; text-align: center}
|
||||
header {margin: -24px 0 5px 0; line-height: 24px}
|
||||
button {font: 12px sans-serif; cursor: pointer}
|
||||
p {position: fixed; bottom: 0; margin: 0; padding: 2px 3px 2px 3px; outline: 1px solid #ffc000; display: none; overflow: hidden; white-space: nowrap; background-color: #ffffe0}
|
||||
a {color: #0366d6}
|
||||
#hl {position: absolute; display: none; overflow: hidden; white-space: nowrap; pointer-events: none; background-color: #ffffe0; outline: 1px solid #ffc000; height: 15px}
|
||||
header {margin: -22px 0 6px 0}
|
||||
button {border: none; background: none; width: 24px; height: 24px; cursor: pointer; margin: 0; padding: 2px 0 0 0; text-align: center}
|
||||
button:hover {background-color: var(--hl-bg); outline: 1px solid var(--hl-border); border-radius: 4px}
|
||||
dl {margin: 0 4px 8px 4px}
|
||||
dt {margin: 1px; padding: 2px 0; font-weight: bold}
|
||||
dd {margin: 1px; padding: 2px 4px}
|
||||
dl.frames {float: left; width: 160px}
|
||||
dl.frames > dd {color: #000000}
|
||||
dl.hotkeys {clear: left; border-top: 1px solid var(--legend-border)}
|
||||
dl.hotkeys > dt {float: left; clear: left; width: 158px; margin-right: 4px; text-align: right}
|
||||
dl.hotkeys > dd {float: left}
|
||||
p {position: fixed; bottom: 0; margin: 0; padding: 2px 3px 2px 3px; outline: 1px solid var(--hl-border); display: none; overflow: hidden; white-space: nowrap; background-color: var(--hl-bg); color: var(--fg)}
|
||||
a {color: var(--link)}
|
||||
#legend {padding: 4px; border-radius: 4px; background: var(--legend-bg); border: 1px solid var(--legend-border); display: none}
|
||||
#hl {position: absolute; display: none; overflow: hidden; white-space: nowrap; pointer-events: none; background-color: var(--hl-bg); outline: 1px solid var(--hl-border); height: 15px}
|
||||
#hl span {padding: 0 3px 0 3px}
|
||||
#status {left: 0}
|
||||
#match {right: 0}
|
||||
#reset {cursor: pointer}
|
||||
#canvas {width: 100%; height: 576px}
|
||||
</style>
|
||||
<script>
|
||||
{
|
||||
let theme;
|
||||
try { theme = localStorage.getItem('flame-theme'); } catch (ignored) {}
|
||||
if (theme ? theme === 'dark' : matchMedia('(prefers-color-scheme: dark)').matches) {
|
||||
document.documentElement.classList.add('dark');
|
||||
}
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body style='font: 12px Verdana, sans-serif'>
|
||||
<h1>CPU profile</h1>
|
||||
<header style='text-align: left'><button id='reverse' title='Reverse'>🔻</button> <button id='search' title='Search'>🔍</button></header>
|
||||
<header style='text-align: right'>Produced by <a href='https://github.com/async-profiler/async-profiler'>async-profiler</a></header>
|
||||
<header style='float: left'>
|
||||
<button id='inverted' title='Invert (I)'><svg xmlns='http://www.w3.org/2000/svg' width='20' height='20' viewBox='0 0 392 392'><path d='M196,36 L316,156 L76,156 Z' fill='#004d80'/><path d='M196,356 L76,236 L316,236 Z' fill='#004d80'/><path d='M196,54 L298,156 L94,156 Z' fill='#ff8d40'/><path d='M196,338 L94,236 L298,236 Z' fill='#40b2ff'/><rect x='94' y='188' width='204' height='16' fill='#004d80'/></svg></button>
|
||||
<button id='search' title='Search (Ctrl+F)'><svg xmlns='http://www.w3.org/2000/svg' width='20' height='20' viewBox='-39.3 -39.3 471.1 471.1'><circle cx='147.7' cy='147.8' r='125.9' fill='#fff'/><path fill='#40b2ff' d='M370.7 348.7c0 1.4-1.6 6.3-7.2 12.3-6.2 6.7-12.5 9.8-14.7 9.8h-.1c-19.5-1.6-62-43.2-109.6-106.8 9.2-7.2 17.5-15.5 24.6-24.6 63.6 47.6 105.2 90.2 106.8 109.6z'/><path fill='#ff8d40' d='M208.7 86.9l-14.5 14.5c-17.1 17.1-46.5 5-46.5-19.3V61.6c-49 0-88.4 40.8-86.1 90.2 2 43.9 38.1 80 82 82 49.5 2.3 90.2-37.2 90.2-86.1 0-23.7-9.6-45.2-25.1-60.8z'/><path fill='#004d80' d='M276.1 221c12.3-21.5 19.5-46.5 19.5-73.2C295.6 66.3 229.2.1 147.7.1S0 66.3 0 147.9s66.3 147.7 147.7 147.7c26.6 0 51.5-7.1 73.2-19.5 39.8 53.3 91.9 113.5 126.1 116.4 12.3.5 22.9-6.7 32.8-16.7 5.2-5.6 13.8-16.9 12.8-28.8-2.9-34.1-63.1-86.2-116.4-126.1zM147.7 273.8c-69.5 0-125.9-56.5-125.9-125.9S78.3 21.9 147.7 21.9 273.6 78.4 273.6 147.8s-56.4 126-125.9 126zm215.9 87.2c-6.2 6.7-12.4 9.8-14.7 9.8h-.1c-19.5-1.6-62-43.2-109.6-106.8 9.2-7.2 17.5-15.5 24.6-24.6 63.6 47.6 105.2 90.2 106.8 109.6 0 1.4-1.6 6.3-7.2 12.4z'/></svg></button>
|
||||
<button id='darkmode' title='Toggle dark mode (D)'><svg xmlns='http://www.w3.org/2000/svg' width='20' height='20' viewBox='0 0 20 20'><path d='M10 4a6 6 0 0 1 0 12z' fill='#ff8d40'/><path d='M10 4a6 6 0 0 0 0 12z' fill='#ffffff'/><circle cx='10' cy='10' r='8' fill='none' stroke='#004d80'/></svg></button>
|
||||
<button id='info'><svg xmlns='http://www.w3.org/2000/svg' width='20' height='20' viewBox='0 0 20 20'><circle cx='10' cy='10' r='8' stroke='#004d80' fill='none'/><path d='M10 5.5c-1.25 0-2.25 1-2.25 2.25H9a1.25 1.25 0 0 1 2.5 0c0 .65-.55 1-1 1.2-.7.35-1.25.85-1.25 1.8V11h1.5v-.25c0-.37.29-.65.68-.83.73-.34 1.32-.87 1.32-2.17 0-1.25-1.5-2.25-2.75-2.25' fill='#ff8d40' stroke='#ff8d40' stroke-width='.6' stroke-linecap='round' stroke-linejoin='round'/><circle cx='10' cy='13.5' r='1.2' fill='#ff8d40'/></svg></button>
|
||||
</header>
|
||||
<header style='float: right'>Produced by <a href='https://github.com/async-profiler/async-profiler'>async-profiler</a></header>
|
||||
<div id='legend' style='position: absolute'>
|
||||
<dl class='frames'>
|
||||
<dt>Frame types</dt>
|
||||
<dd style='background-color: #e17d00'>Kernel</dd>
|
||||
<dd style='background-color: #e15a5a'>Native</dd>
|
||||
<dd style='background-color: #c8c83c'>C++ (VM)</dd>
|
||||
<dd style='background-color: #50e150'>Java compiled</dd>
|
||||
<dd style='background-color: #cce880'>Java compiled by C1</dd>
|
||||
<dd style='background-color: #50cccc'>Inlined</dd>
|
||||
<dd style='background-color: #b2e1b2'>Interpreted</dd>
|
||||
</dl>
|
||||
<dl class='frames'>
|
||||
<dt>Allocation profile</dt>
|
||||
<dd style='background-color: #50cccc'>Allocated class</dd>
|
||||
<dd style='background-color: #e17d00'>Allocation outside TLAB</dd>
|
||||
<dt>Lock profile</dt>
|
||||
<dd style='background-color: #50cccc'>Lock class</dd>
|
||||
<dt> </dt>
|
||||
<dt>Search</dt>
|
||||
<dd style='background-color: #ee00ee'>Matches regexp</dd>
|
||||
</dl>
|
||||
<dl class='hotkeys'>
|
||||
<dt>Click frame</dt><dd>Zoom into frame</dd>
|
||||
<dt>Ctrl/Alt+Click</dt><dd>Remove stack</dd>
|
||||
<dt>0</dt><dd>Reset zoom</dd>
|
||||
<dt>I</dt><dd>Invert graph</dd>
|
||||
<dt>Ctrl+F</dt><dd>Search</dd>
|
||||
<dt>N</dt><dd>Next match</dd>
|
||||
<dt>Shift+N</dt><dd>Previous match</dd>
|
||||
<dt>Esc</dt><dd>Cancel search</dd>
|
||||
</dl>
|
||||
</div>
|
||||
<canvas id='canvas'></canvas>
|
||||
<div id='hl'><span></span></div>
|
||||
<p id='status'></p>
|
||||
@@ -29,9 +87,12 @@
|
||||
// Copyright The async-profiler authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
'use strict';
|
||||
let root, rootLevel, px, pattern;
|
||||
let level0 = 0, left0 = 0, width0 = 0;
|
||||
let reverse = false;
|
||||
let root, px, pattern;
|
||||
let level0 = 0, left0 = 0, width0 = 0, d = 0;
|
||||
let nav = [], navIndex, matchval;
|
||||
let inverted = false;
|
||||
const U = undefined;
|
||||
const maxdiff = -1;
|
||||
const levels = Array(36);
|
||||
for (let h = 0; h < levels.length; h++) {
|
||||
levels[h] = [];
|
||||
@@ -65,10 +126,18 @@
|
||||
return '#' + (p[0] + ((p[1] * v) << 16 | (p[2] * v) << 8 | (p[3] * v))).toString(16);
|
||||
}
|
||||
|
||||
function getDiffColor(diff) {
|
||||
if (diff === U) return '#ffdd33';
|
||||
if (diff === 0) return '#e0e0e0';
|
||||
const v = Math.round(128 * (maxdiff - Math.abs(diff)) / maxdiff) + 96;
|
||||
return diff > 0 ? 'rgb(255,' + v + ',' + v + ')' : 'rgb(' + v + ',' + v + ',255)';
|
||||
}
|
||||
|
||||
function f(key, level, left, width, inln, c1, int) {
|
||||
levels[level0 = level].push({left: left0 += left, width: width0 = width || width0,
|
||||
color: getColor(palette[key & 7]), title: cpool[key >>> 3],
|
||||
details: (int ? ', int=' + int : '') + (c1 ? ', c1=' + c1 : '') + (inln ? ', inln=' + inln : '')
|
||||
levels[level0 = level].push({level, left: left0 += left, width: width0 = width || width0,
|
||||
color: maxdiff >= 0 ? getDiffColor(d) : getColor(palette[key & 7]),
|
||||
title: cpool[key >>> 3],
|
||||
details: (d ? (d > 0 ? ', +' : ', ') + d : '') + (int ? ', int=' + int : '') + (c1 ? ', c1=' + c1 : '') + (inln ? ', inln=' + inln : '')
|
||||
});
|
||||
}
|
||||
|
||||
@@ -133,19 +202,21 @@
|
||||
}
|
||||
|
||||
pattern = r ? RegExp(r) : undefined;
|
||||
const matched = render(root, rootLevel);
|
||||
document.getElementById('matchval').textContent = pct(matched, root.width) + '%';
|
||||
const matched = render(root, nav = []);
|
||||
navIndex = -1;
|
||||
document.getElementById('matchval').textContent = matchval = pct(matched, root.width) + '%';
|
||||
document.getElementById('match').style.display = r ? 'inline-block' : 'none';
|
||||
}
|
||||
|
||||
function render(newRoot, newLevel) {
|
||||
function render(newRoot, nav) {
|
||||
const bg = getComputedStyle(document.documentElement).getPropertyValue('--bg');
|
||||
|
||||
if (root) {
|
||||
c.fillStyle = '#ffffff';
|
||||
c.fillStyle = bg;
|
||||
c.fillRect(0, 0, canvasWidth, canvasHeight);
|
||||
}
|
||||
|
||||
root = newRoot || levels[0][0];
|
||||
rootLevel = newLevel || 0;
|
||||
px = canvasWidth / root.width;
|
||||
|
||||
const x0 = root.left;
|
||||
@@ -153,7 +224,7 @@
|
||||
const marked = [];
|
||||
|
||||
function mark(f) {
|
||||
return marked[f.left] >= f.width || (marked[f.left] = f.width);
|
||||
return marked[f.left] || (marked[f.left] = f);
|
||||
}
|
||||
|
||||
function totalMarked() {
|
||||
@@ -161,14 +232,16 @@
|
||||
let left = 0;
|
||||
Object.keys(marked).sort(function(a, b) { return a - b; }).forEach(function(x) {
|
||||
if (+x >= left) {
|
||||
total += marked[x];
|
||||
left = +x + marked[x];
|
||||
const m = marked[x];
|
||||
if (nav) nav.push(m);
|
||||
total += m.width;
|
||||
left = +x + m.width;
|
||||
}
|
||||
});
|
||||
return total;
|
||||
}
|
||||
|
||||
function drawFrame(f, y, alpha) {
|
||||
function drawFrame(f, y) {
|
||||
if (f.left < x1 && f.left + f.width > x0) {
|
||||
c.fillStyle = pattern && f.title.match(pattern) && mark(f) ? '#ee00ee' : f.color;
|
||||
c.fillRect((f.left - x0) * px, y, f.width * px, 15);
|
||||
@@ -180,18 +253,18 @@
|
||||
c.fillText(title, Math.max(f.left - x0, 0) * px + 3, y + 12, f.width * px - 6);
|
||||
}
|
||||
|
||||
if (alpha) {
|
||||
c.fillStyle = 'rgba(255, 255, 255, 0.5)';
|
||||
if (f.level < root.level) {
|
||||
c.fillStyle = bg + '80';
|
||||
c.fillRect((f.left - x0) * px, y, f.width * px, 15);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (let h = 0; h < levels.length; h++) {
|
||||
const y = reverse ? h * 16 : canvasHeight - (h + 1) * 16;
|
||||
const y = inverted ? h * 16 : canvasHeight - (h + 1) * 16;
|
||||
const frames = levels[h];
|
||||
for (let i = 0; i < frames.length; i++) {
|
||||
drawFrame(frames[i], y, h < rootLevel);
|
||||
drawFrame(frames[i], y);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,24 +278,24 @@
|
||||
}
|
||||
|
||||
canvas.onmousemove = function() {
|
||||
const h = Math.floor((reverse ? event.offsetY : (canvasHeight - event.offsetY)) / 16);
|
||||
const h = Math.floor((inverted ? event.offsetY : (canvasHeight - event.offsetY)) / 16);
|
||||
if (h >= 0 && h < levels.length) {
|
||||
const f = findFrame(levels[h], event.offsetX / px + root.left);
|
||||
if (f) {
|
||||
if (f !== root) getSelection().removeAllRanges();
|
||||
hl.style.left = (Math.max(f.left - root.left, 0) * px + canvas.offsetLeft) + 'px';
|
||||
hl.style.width = (Math.min(f.width, root.width) * px) + 'px';
|
||||
hl.style.top = ((reverse ? h * 16 : canvasHeight - (h + 1) * 16) + canvas.offsetTop) + 'px';
|
||||
hl.style.top = ((inverted ? h * 16 : canvasHeight - (h + 1) * 16) + canvas.offsetTop) + 'px';
|
||||
hl.firstChild.textContent = f.title;
|
||||
hl.style.display = 'block';
|
||||
canvas.title = f.title + '\n(' + samples(f.width) + f.details + ', ' + pct(f.width, levels[0][0].width) + '%)';
|
||||
canvas.style.cursor = 'pointer';
|
||||
canvas.onclick = function() {
|
||||
if (event.altKey && h >= rootLevel) {
|
||||
if ((event.altKey || event.ctrlKey) && h >= root.level && h > 0) {
|
||||
removeStack(f.left, f.width);
|
||||
root.width > f.width ? render(root, rootLevel) : render();
|
||||
root.width > f.width ? render(root) : render();
|
||||
} else if (f !== root) {
|
||||
render(f, h);
|
||||
render(f);
|
||||
}
|
||||
canvas.onmousemove();
|
||||
};
|
||||
@@ -246,8 +319,8 @@
|
||||
getSelection().selectAllChildren(hl);
|
||||
}
|
||||
|
||||
document.getElementById('reverse').onclick = function() {
|
||||
reverse = !reverse;
|
||||
document.getElementById('inverted').onclick = function() {
|
||||
inverted = !inverted;
|
||||
render();
|
||||
}
|
||||
|
||||
@@ -259,12 +332,51 @@
|
||||
search(false);
|
||||
}
|
||||
|
||||
window.onkeydown = function() {
|
||||
if ((event.ctrlKey || event.metaKey) && event.keyCode === 70) {
|
||||
document.getElementById('darkmode').onclick = function() {
|
||||
const theme = document.documentElement.classList.toggle('dark') ? 'dark' : 'light';
|
||||
try { localStorage.setItem('flame-theme', theme); } catch (ignored) {}
|
||||
render(root);
|
||||
}
|
||||
|
||||
const btnInfo = document.getElementById('info');
|
||||
const legend = document.getElementById('legend');
|
||||
|
||||
btnInfo.onmouseover = function() {
|
||||
legend.style.left = (btnInfo.offsetLeft + 24) + 'px';
|
||||
legend.style.top = (btnInfo.offsetTop + 24) + 'px';
|
||||
legend.style.display = 'block';
|
||||
}
|
||||
|
||||
btnInfo.onmouseout = function() {
|
||||
legend.style.display = 'none';
|
||||
}
|
||||
|
||||
window.onkeydown = function(event) {
|
||||
if ((event.ctrlKey || event.metaKey) && event.key === 'f') {
|
||||
event.preventDefault();
|
||||
search(true);
|
||||
} else if (event.keyCode === 27) {
|
||||
return false;
|
||||
} else if (event.key === 'Escape') {
|
||||
search(false);
|
||||
} else if ((event.key === 'n' || event.key === 'N') && nav.length > 0) {
|
||||
navIndex = (navIndex + (event.shiftKey ? nav.length - 1 : 1)) % nav.length;
|
||||
render(nav[navIndex]);
|
||||
document.getElementById('matchval').textContent = matchval + ' (' + (navIndex + 1) + ' of ' + nav.length + ')';
|
||||
window.scroll(0, inverted ? root.level * 16 : canvasHeight - (root.level + 1) * 16);
|
||||
canvas.onmousemove();
|
||||
return false;
|
||||
} else if (event.key === 'i') {
|
||||
canvas.onmouseout();
|
||||
document.getElementById('inverted').onclick();
|
||||
return false;
|
||||
} else if (event.key === 'd') {
|
||||
document.getElementById('darkmode').onclick();
|
||||
return false;
|
||||
} else if (event.key === '0') {
|
||||
canvas.onmouseout();
|
||||
root = levels[0][0];
|
||||
search(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -483,7 +595,7 @@ f(484,12,2,3)
|
||||
u(476)
|
||||
f(68,10,3,23)
|
||||
f(412,11,2,16)
|
||||
u(380,15)
|
||||
f(380,12,1,15)
|
||||
f(372,13,1,3)
|
||||
n(388,2)
|
||||
n(396)
|
||||
@@ -492,7 +604,7 @@ n(428,5)
|
||||
f(436,14,1,2)
|
||||
n(444)
|
||||
u(420)
|
||||
f(452,11,3,3)
|
||||
f(452,11,2,3)
|
||||
u(468)
|
||||
f(460,13,1,2)
|
||||
f(492,11,2)
|
||||
@@ -530,7 +642,7 @@ u(108)
|
||||
f(1444,15,12,50)
|
||||
f(1452,16,1,22)
|
||||
f(124,17,2,20)
|
||||
f(156,18,9,8)
|
||||
f(156,18,10,8)
|
||||
f(228,19,2,2)
|
||||
n(508)
|
||||
n(605)
|
||||
@@ -543,7 +655,7 @@ u(613)
|
||||
u(629)
|
||||
u(621)
|
||||
f(579,18,2)
|
||||
f(1460,16,3,27)
|
||||
f(1460,16,2,27)
|
||||
f(124,17,11,16)
|
||||
f(156,18,13,3)
|
||||
u(605)
|
||||
BIN
.assets/images/AggregatedView.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
.assets/images/ProfilerSamplings.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
.assets/images/SortedSamplings.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
.assets/images/comptask_feature.png
Normal file
|
After Width: | Height: | Size: 135 KiB |
BIN
.assets/images/flamegraph.png
Normal file
|
After Width: | Height: | Size: 76 KiB |
BIN
.assets/images/flamegraph_colors.png
Normal file
|
After Width: | Height: | Size: 116 KiB |
BIN
.assets/images/flamegraph_diff.png
Normal file
|
After Width: | Height: | Size: 78 KiB |
BIN
.assets/images/heatmap.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
.assets/images/heatmap1.png
Normal file
|
After Width: | Height: | Size: 73 KiB |
BIN
.assets/images/heatmap2.png
Normal file
|
After Width: | Height: | Size: 96 KiB |
BIN
.assets/images/heatmap3.png
Normal file
|
After Width: | Height: | Size: 134 KiB |
BIN
.assets/images/heatmap4.png
Normal file
|
After Width: | Height: | Size: 198 KiB |
BIN
.assets/images/heatmap5.png
Normal file
|
After Width: | Height: | Size: 236 KiB |
BIN
.assets/images/heatmap6.png
Normal file
|
After Width: | Height: | Size: 106 KiB |
BIN
.assets/images/nativemem_flamegraph.png
Normal file
|
After Width: | Height: | Size: 69 KiB |
BIN
.assets/images/pcaddr_feature.png
Normal file
|
After Width: | Height: | Size: 271 KiB |
BIN
.assets/images/treeview_example.png
Normal file
|
After Width: | Height: | Size: 166 KiB |
BIN
.assets/images/vtable_feature.png
Normal file
|
After Width: | Height: | Size: 19 KiB |
132
.clang-tidy
Normal file
@@ -0,0 +1,132 @@
|
||||
Checks: >
|
||||
-*,
|
||||
bugprone-assert-side-effect,
|
||||
bugprone-bool-pointer-implicit-conversion,
|
||||
bugprone-chained-comparison,
|
||||
bugprone-copy-constructor-init,
|
||||
bugprone-incorrect-roundings,
|
||||
bugprone-infinite-loop,
|
||||
bugprone-integer-division,
|
||||
bugprone-misplaced-operator-in-strlen-in-alloc,
|
||||
bugprone-misplaced-pointer-arithmetic-in-alloc,
|
||||
bugprone-misplaced-widening-cast,
|
||||
bugprone-non-zero-enum-to-bool-conversion,
|
||||
bugprone-pointer-arithmetic-on-polymorphic-object,
|
||||
bugprone-posix-return,
|
||||
bugprone-redundant-branch-condition,
|
||||
bugprone-return-const-ref-from-parameter,
|
||||
bugprone-sizeof-container,
|
||||
bugprone-standalone-empty,
|
||||
bugprone-string-literal-with-embedded-nul,
|
||||
bugprone-string-integer-assignment,
|
||||
bugprone-suspicious-include,
|
||||
bugprone-suspicious-memset-usage,
|
||||
bugprone-suspicious-missing-comma,
|
||||
bugprone-suspicious-realloc-usage,
|
||||
bugprone-suspicious-semicolon,
|
||||
bugprone-suspicious-string-compare,
|
||||
bugprone-swapped-arguments,
|
||||
bugprone-terminating-continue,
|
||||
bugprone-too-small-loop-variable,
|
||||
bugprone-undefined-memory-manipulation,
|
||||
bugprone-undelegated-constructor,
|
||||
bugprone-unhandled-self-assignment,
|
||||
bugprone-unused-raii,
|
||||
bugprone-unused-return-value,
|
||||
bugprone-use-after-move,
|
||||
bugprone-virtual-near-miss,
|
||||
cppcoreguidelines-misleading-capture-default-by-value,
|
||||
cppcoreguidelines-pro-type-const-cast,
|
||||
cppcoreguidelines-slicing,
|
||||
cert-oop58-cpp,
|
||||
cert-flp30-c,
|
||||
misc-confusable-identifiers,
|
||||
misc-definitions-in-headers,
|
||||
misc-header-include-cycle,
|
||||
misc-misplaced-const,
|
||||
misc-non-copyable-objects,
|
||||
misc-redundant-expression,
|
||||
misc-static-assert,
|
||||
misc-unconventional-assign-operator,
|
||||
misc-unused-alias-decls,
|
||||
performance-avoid-endl,
|
||||
performance-faster-string-find,
|
||||
performance-for-range-copy,
|
||||
performance-implicit-conversion-in-loop,
|
||||
performance-inefficient-algorithm,
|
||||
performance-inefficient-string-concatenation,
|
||||
performance-inefficient-vector-operation,
|
||||
performance-move-const-arg,
|
||||
performance-move-constructor-init,
|
||||
performance-no-automatic-move,
|
||||
performance-noexcept-destructor,
|
||||
performance-noexcept-move-constructor,
|
||||
performance-noexcept-swap,
|
||||
performance-trivially-destructible,
|
||||
performance-type-promotion-in-math-fn,
|
||||
performance-unnecessary-copy-initialization,
|
||||
performance-unnecessary-value-param,
|
||||
readability-avoid-return-with-void-value,
|
||||
readability-avoid-unconditional-preprocessor-if,
|
||||
readability-const-return-type,
|
||||
readability-container-contains,
|
||||
readability-container-data-pointer,
|
||||
readability-container-size-empty,
|
||||
readability-delete-null-pointer,
|
||||
readability-duplicate-include,
|
||||
readability-function-size,
|
||||
readability-identifier-naming,
|
||||
readability-misleading-indentation,
|
||||
readability-misplaced-array-index,
|
||||
readability-named-parameter,
|
||||
readability-operators-representation,
|
||||
readability-qualified-auto,
|
||||
readability-redundant-access-specifiers,
|
||||
readability-redundant-casting,
|
||||
readability-redundant-control-flow,
|
||||
readability-redundant-declaration,
|
||||
readability-redundant-function-ptr-dereference,
|
||||
readability-redundant-preprocessor,
|
||||
readability-redundant-string-cstr,
|
||||
readability-redundant-string-init,
|
||||
readability-reference-to-constructed-temporary,
|
||||
readability-simplify-subscript-expr,
|
||||
readability-static-accessed-through-instance,
|
||||
readability-static-definition-in-anonymous-namespace,
|
||||
readability-string-compare,
|
||||
readability-uniqueptr-delete-release,
|
||||
readability-use-anyofallof,
|
||||
# TODO: Consider these
|
||||
# bugprone-switch-missing-default-case
|
||||
# bugprone-multi-level-implicit-pointer-conversion
|
||||
# bugprone-branch-clone
|
||||
# cert-err33-c
|
||||
# cppcoreguidelines-narrowing-conversions
|
||||
# cppcoreguidelines-init-variables
|
||||
# cppcoreguidelines-explicit-virtual-functions
|
||||
# cppcoreguidelines-special-member-functions
|
||||
# llvm-include-order
|
||||
# misc-const-correctness
|
||||
# modernize-*
|
||||
# performance-enum-size
|
||||
# readability-function-cognitive-complexity
|
||||
# readability-else-after-return
|
||||
# readability-convert-member-functions-to-static
|
||||
# readability-math-missing-parentheses
|
||||
# readability-non-const-parameter
|
||||
# readability-redundant-member-init
|
||||
# readability-simplify-boolean-expr
|
||||
# misc-include-cleaner
|
||||
# google-explicit-constructor
|
||||
# cppcoreguidelines-virtual-class-destructor
|
||||
# readability-make-member-function-const
|
||||
HeaderFilterRegex: "*"
|
||||
CheckOptions:
|
||||
- key: readability-identifier-naming.LocalVariableCase
|
||||
value: lower_case
|
||||
- key: readability-identifier-naming.LocalVariableIgnoredRegexp
|
||||
value: '(KB|Thread|setDaemon|klassOop|nVMs|loadLibrary|getTicksFrequency|counterTime|System|M|R|s_)'
|
||||
- key: readability-identifier-naming.PrivateMemberPrefix
|
||||
value: _
|
||||
- key: readability-identifier-naming.ConstexprVariableCase
|
||||
value: UPPER_CASE
|
||||
51
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
name: "🐛 Bug Report"
|
||||
description: Report a bug
|
||||
title: "(short issue description)"
|
||||
assignees: []
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Describe the bug
|
||||
description: What is the problem? A clear and concise description of the bug.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: behavior
|
||||
attributes:
|
||||
label: Expected vs. actual behavior
|
||||
description: |
|
||||
What did you expect to happen? What happened instead?
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
label: Reproduction Steps
|
||||
description: |
|
||||
Step-by-step instructions how to reproduce the issue. Attach a code sample if available.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: context
|
||||
attributes:
|
||||
label: Additional Information/Context
|
||||
description: |
|
||||
Anything else that might be relevant for troubleshooting this bug: profiles, screenshots, etc.
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Async-profiler version
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: environment
|
||||
attributes:
|
||||
label: Environment details
|
||||
description: |
|
||||
OS name and version, JDK version, CPU architecture. Is an application running in a container?
|
||||
validations:
|
||||
required: false
|
||||
6
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: 💬 General Question
|
||||
url: https://github.com/async-profiler/async-profiler/discussions
|
||||
about: Please ask and answer questions as a discussion thread
|
||||
14
.github/ISSUE_TEMPLATE/documentation.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
name: "📕 Documentation Issue"
|
||||
description: Report an issue in the profiler documentation
|
||||
title: "(short issue description)"
|
||||
labels: [documentation]
|
||||
assignees: []
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Describe the issue
|
||||
description: A clear and concise description of the issue.
|
||||
validations:
|
||||
required: true
|
||||
39
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
name: 🚀 Feature Request
|
||||
description: Suggest an idea for this project
|
||||
title: "(short issue description)"
|
||||
labels: [enhancement]
|
||||
assignees: []
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Describe the feature
|
||||
description: A clear and concise description of the feature you are proposing.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: use-case
|
||||
attributes:
|
||||
label: Use Case
|
||||
description: |
|
||||
Why do you need this feature? For example: "I'm always frustrated when..."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: solution
|
||||
attributes:
|
||||
label: Proposed Solution
|
||||
description: |
|
||||
Suggest how to implement the addition or change. Provide references to alternative solutions, if any.
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: ack
|
||||
attributes:
|
||||
label: Acknowledgements
|
||||
options:
|
||||
- label: I may be able to implement this feature request
|
||||
required: false
|
||||
- label: This feature might incur a breaking change
|
||||
required: false
|
||||
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
### Description
|
||||
|
||||
|
||||
### Related issues
|
||||
|
||||
|
||||
### Motivation and context
|
||||
|
||||
|
||||
### How has this been tested?
|
||||
|
||||
|
||||
---
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the [Apache 2.0 license].
|
||||
|
||||
[Apache 2.0 license]: https://www.apache.org/licenses/LICENSE-2.0
|
||||
83
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
name: build-template
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
platform:
|
||||
type: string
|
||||
required: true
|
||||
runner:
|
||||
type: string
|
||||
required: true
|
||||
container-image:
|
||||
type: string
|
||||
required: false
|
||||
|
||||
env:
|
||||
build_java_distribution: corretto
|
||||
build_java_version: 11
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ inputs.runner }}
|
||||
container:
|
||||
image: ${{ inputs.container-image && format('public.ecr.aws/async-profiler/asprof-builder-{0}', inputs.container-image) || '' }}
|
||||
name: "build and unit test (${{ inputs.platform }})"
|
||||
steps:
|
||||
- name: Run container setup
|
||||
if: inputs.container-image != ''
|
||||
run: "[ ! -f /root/setup.sh ] || /root/setup.sh"
|
||||
- name: Setup Java
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
distribution: ${{ env.build_java_distribution }}
|
||||
java-version: ${{ env.build_java_version }}
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and unit test
|
||||
id: build
|
||||
run: |
|
||||
set -x
|
||||
HASH=${GITHUB_SHA:0:7}
|
||||
case "${{ inputs.platform }}" in
|
||||
macos*)
|
||||
brew install gcovr
|
||||
make COMMIT_TAG=$HASH FAT_BINARY=true release coverage -j
|
||||
;;
|
||||
*)
|
||||
make COMMIT_TAG=$HASH CC=/usr/local/musl/bin/musl-gcc release coverage -j
|
||||
echo "debug_archive=$(find . -type f -name "async-profiler-*-debug*" -exec basename {} \;)" >> $GITHUB_OUTPUT
|
||||
;;
|
||||
esac
|
||||
echo "archive=$(find . -type f -name "async-profiler-*" -not -name "*-debug*" -exec basename {} \;)" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
- name: Set artifact name
|
||||
id: set_artifact_name
|
||||
run: echo "artifact_name=async-profiler-${{ inputs.platform }}-${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
- name: Upload binaries
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ steps.set_artifact_name.outputs.artifact_name }}
|
||||
path: ${{ steps.build.outputs.archive }}
|
||||
if-no-files-found: error
|
||||
- name: Upload debug info
|
||||
uses: actions/upload-artifact@v4
|
||||
if: inputs.platform != 'macos'
|
||||
with:
|
||||
name: ${{ steps.set_artifact_name.outputs.artifact_name }}-debug
|
||||
path: ${{ steps.build.outputs.debug_archive }}
|
||||
if-no-files-found: error
|
||||
- name: Upload coverage report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-coverage-${{ inputs.platform }}
|
||||
path: build/test/coverage/
|
||||
if-no-files-found: error
|
||||
23
.github/workflows/ci.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: Continuous Integration
|
||||
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-java@v3
|
||||
with:
|
||||
distribution: 'corretto'
|
||||
java-version: '11'
|
||||
- run: sudo sysctl kernel.perf_event_paranoid=1
|
||||
- run: make -j`nproc`
|
||||
- run: make test
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
path: |
|
||||
build/bin/
|
||||
build/lib/
|
||||
48
.github/workflows/clang-tidy-review.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: clang-tidy-review
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- code-check
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
clang-tidy-results:
|
||||
if: ${{ github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' }}
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: "public.ecr.aws/async-profiler/asprof-code-check:latest"
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
actions: read
|
||||
steps:
|
||||
- name: Download code-check artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
run-id: ${{ github.event.workflow_run.id }}
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
name: code-check-artifacts
|
||||
path: /tmp/code-check-artifacts/
|
||||
- name: Read PR information
|
||||
id: pr_info
|
||||
run: |
|
||||
cd /tmp/code-check-artifacts
|
||||
echo "pr_id=$(cat pr-id.txt)" >> "$GITHUB_OUTPUT"
|
||||
echo "pr_head_repo=$(cat pr-head-repo.txt)" >> "$GITHUB_OUTPUT"
|
||||
echo "pr_head_sha=$(cat pr-head-sha.txt)" >> "$GITHUB_OUTPUT"
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ steps.pr_info.outputs.pr_head_repo }}
|
||||
ref: ${{ steps.pr_info.outputs.pr_head_sha }}
|
||||
persist-credentials: false
|
||||
- name: Run clang-tidy-pr-comments action
|
||||
uses: platisd/clang-tidy-pr-comments@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
clang_tidy_fixes: /tmp/code-check-artifacts/clang-tidy-fixes.yml
|
||||
pull_request_id: ${{ steps.pr_info.outputs.pr_id }}
|
||||
python_path: python
|
||||
auto_resolve_conversations: true
|
||||
suggestions_per_comment: 100
|
||||
49
.github/workflows/code-check.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: code-check
|
||||
|
||||
on:
|
||||
- pull_request
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
cpp-lint:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: "public.ecr.aws/async-profiler/asprof-code-check:latest"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- name: Mark repo as safe for Git
|
||||
run: git config --global --add safe.directory $GITHUB_WORKSPACE
|
||||
- name: Fetch base branch
|
||||
run: |
|
||||
git remote add upstream "https://github.com/${{ github.event.pull_request.base.repo.full_name }}"
|
||||
git fetch --no-tags --no-recurse-submodules upstream "${{ github.event.pull_request.base.ref }}"
|
||||
- name: Create artifacts directory
|
||||
run: |
|
||||
mkdir code-check-artifacts/
|
||||
echo "${{ github.event.number }}" > code-check-artifacts/pull-request-id.txt
|
||||
- name: Run clang-tidy
|
||||
run: |
|
||||
set pipefail
|
||||
make cpp-lint-diff \
|
||||
DIFF_BASE="$(git merge-base HEAD "upstream/${{ github.event.pull_request.base.ref }}")" \
|
||||
CLANG_TIDY_ARGS_EXTRA="-export-fixes code-check-artifacts/clang-tidy-fixes.yml"
|
||||
shell: bash
|
||||
- name: Save PR information
|
||||
run: |
|
||||
echo "${{ github.event.number }}" > code-check-artifacts/pr-id.txt
|
||||
echo "${{ github.event.pull_request.head.repo.full_name }}" > code-check-artifacts/pr-head-repo.txt
|
||||
echo "${{ github.event.pull_request.head.sha }}" > code-check-artifacts/pr-head-sha.txt
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: code-check-artifacts
|
||||
path: code-check-artifacts/
|
||||
119
.github/workflows/integ.yml
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
name: integration-test-template
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
test-platform:
|
||||
type: string
|
||||
required: true
|
||||
platform:
|
||||
type: string
|
||||
required: true
|
||||
architecture:
|
||||
type: string
|
||||
required: false
|
||||
java-version:
|
||||
type: string
|
||||
required: true
|
||||
java-distribution:
|
||||
type: string
|
||||
required: false
|
||||
default: "corretto"
|
||||
runner:
|
||||
type: string
|
||||
required: true
|
||||
container-image:
|
||||
type: string
|
||||
required: false
|
||||
container-volumes:
|
||||
type: string
|
||||
required: false
|
||||
use-builtin-jdk:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
retry-count:
|
||||
type: number
|
||||
required: false
|
||||
default: 0
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
integration-test:
|
||||
runs-on: ${{ inputs.runner }}
|
||||
container:
|
||||
image: ${{ inputs.container-image && format('public.ecr.aws/async-profiler/asprof-builder-{0}', inputs.container-image) || '' }}
|
||||
options: --privileged
|
||||
volumes: ${{ fromJSON(inputs.container-volumes || '[]') }}
|
||||
name: "${{ inputs.test-platform }}, ${{ inputs.java-distribution }} ${{ inputs.java-version }}"
|
||||
steps:
|
||||
- name: Run container setup
|
||||
if: inputs.container-image != ''
|
||||
run: "[ ! -f /root/setup.sh ] || /root/setup.sh"
|
||||
- name: Setup Java
|
||||
uses: actions/setup-java@v4
|
||||
# https://github.com/actions/setup-java/issues/678#issuecomment-2446279753
|
||||
if: ${{ !inputs.use-builtin-jdk }}
|
||||
with:
|
||||
distribution: ${{ inputs.java-distribution }}
|
||||
java-version: ${{ inputs.java-version }}
|
||||
architecture: ${{ inputs.architecture }}
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
- name: Set variables
|
||||
id: set_variables
|
||||
run: |
|
||||
echo "short_sha=${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
|
||||
echo "artifact_name=async-profiler-${{ inputs.platform }}-${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
- name: Download async-profiler release artifact
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ steps.set_variables.outputs.artifact_name }}
|
||||
path: async_profiler_release
|
||||
- name: Download async-profiler JAR artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: async-profiler-jars
|
||||
path: jar_artifacts
|
||||
- name: Extract async-profiler artifact
|
||||
id: extract_artifact
|
||||
run: |
|
||||
release_archive=$(basename $(find async_profiler_release -type f -iname "async-profiler-*" ))
|
||||
case "${{ inputs.runner }}" in
|
||||
macos*)
|
||||
unzip async_profiler_release/$release_archive
|
||||
;;
|
||||
*)
|
||||
tar xvf async_profiler_release/$release_archive
|
||||
;;
|
||||
esac
|
||||
echo "jars_directory=jar_artifacts" >> $GITHUB_OUTPUT
|
||||
echo "release_directory=$(basename $(find . -type d -iname "async-profiler-*" ))" >> $GITHUB_OUTPUT
|
||||
- name: Download Protobuf Java runtime
|
||||
run: |
|
||||
mkdir -p test/deps
|
||||
cd test/deps
|
||||
curl -L -O "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/$PB_JAVA_VERSION/protobuf-java-$PB_JAVA_VERSION.jar"
|
||||
env:
|
||||
PB_JAVA_VERSION: "4.31.1"
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
mkdir -p build/jar
|
||||
cp ${{ steps.extract_artifact.outputs.jars_directory }}/* build/jar
|
||||
make build/test.jar
|
||||
cp -r ${{ steps.extract_artifact.outputs.release_directory }}/bin build
|
||||
cp -r ${{ steps.extract_artifact.outputs.release_directory }}/lib build
|
||||
make test-java TEST_THREADS=2 RETRY_COUNT=${{ inputs.retry-count }} -j
|
||||
- name: Upload integration test logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: integration-test-logs-${{ inputs.test-platform }}-${{ inputs.java-version }}-${{ steps.set_variables.outputs.short_sha }}
|
||||
path: |
|
||||
build/test/logs/
|
||||
hs_err*.log
|
||||
59
.github/workflows/linters.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: lint
|
||||
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
license-header:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check license headers
|
||||
uses: apache/skywalking-eyes/header@v0.6.0
|
||||
markdown:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install prettier
|
||||
run: |
|
||||
npm install -g prettier@3.4.2
|
||||
make check-md
|
||||
eof-newline:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: EOF newline check
|
||||
env:
|
||||
offenders_path: /tmp/eof_newline_offenders.txt
|
||||
run: |
|
||||
find . -path './.git' -prune -o -exec file --mime-type {} + | grep 'text/' | awk -F: '{print $1}' | while read -r file; do
|
||||
# Read last byte and verify it's a newline
|
||||
if [ -s "$file" ] && [ "$(tail -c1 "$file" | wc -l)" -eq 0 ]; then
|
||||
echo "$file" >> "$offenders_path"
|
||||
fi
|
||||
done
|
||||
if [ -s "$offenders_path" ]; then
|
||||
cat "$offenders_path"
|
||||
exit 1
|
||||
fi
|
||||
trailing-spaces:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Trailing spaces check
|
||||
env:
|
||||
offenders_path: /tmp/trailing_space_offenders.txt
|
||||
run: |
|
||||
grep -rIlE --exclude-dir=.git '[[:blank:]]+$' . > "$offenders_path" || true
|
||||
if [ -s "$offenders_path" ]; then
|
||||
cat "$offenders_path"
|
||||
exit 1
|
||||
fi
|
||||
209
.github/workflows/test-and-publish-nightly.yml
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
name: CI
|
||||
|
||||
on: # We are very liberal in terms of triggering builds. This should be revisited if we start seeing a lot of queueing
|
||||
- push
|
||||
- pull_request
|
||||
- workflow_dispatch
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-jars:
|
||||
runs-on: ubuntu-latest
|
||||
name: build / jars
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
- name: Build JARs
|
||||
run: make jar
|
||||
- name: Upload JARs
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: async-profiler-jars
|
||||
path: build/jar/*
|
||||
if-no-files-found: error
|
||||
|
||||
build-linux-arm64:
|
||||
name: build / linux-arm64
|
||||
uses: ./.github/workflows/build.yml
|
||||
with:
|
||||
platform: linux-arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
container-image: "arm:latest"
|
||||
|
||||
build-linux-x64:
|
||||
name: build / linux-x64
|
||||
uses: ./.github/workflows/build.yml
|
||||
with:
|
||||
platform: linux-x64
|
||||
runner: ubuntu-latest
|
||||
container-image: x86:latest
|
||||
|
||||
build-macos:
|
||||
name: build / macos
|
||||
uses: ./.github/workflows/build.yml
|
||||
with:
|
||||
platform: macos
|
||||
runner: macos-15
|
||||
|
||||
integ-linux-x64:
|
||||
name: integ / linux-x64
|
||||
needs: [build-linux-x64, build-jars]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
test-platform: [linux-x64]
|
||||
java-version: [8, 11, 17, 21, 25]
|
||||
java-distribution: [corretto]
|
||||
container-image: [x86:latest]
|
||||
include:
|
||||
- test-platform: linux-x64-alpine
|
||||
container-image: alpine:corretto-11
|
||||
use-builtin-jdk: true
|
||||
java-distribution: corretto
|
||||
java-version: 11
|
||||
- test-platform: linux-x64-AL2
|
||||
container-image: amazonlinux:2
|
||||
# GHA provides Node.js by attaching a volume to the container. The container path is
|
||||
# '/__e/node20', and it's not writable unless we override it via 'container.volumes'.
|
||||
container-volumes: '["/tmp/node20:/__e/node20"]'
|
||||
java-version: 11
|
||||
java-distribution: corretto
|
||||
- test-platform: linux-x64-AL2023
|
||||
container-image: amazonlinux:2023
|
||||
java-version: 11
|
||||
java-distribution: corretto
|
||||
- test-platform: linux-x64-alpaquita
|
||||
container-image: alpaquita:x86_64-liberica-21
|
||||
use-builtin-jdk: true
|
||||
java-distribution: liberica
|
||||
java-version: 21
|
||||
uses: ./.github/workflows/integ.yml
|
||||
with:
|
||||
platform: linux-x64
|
||||
test-platform: ${{ matrix.test-platform }}
|
||||
runner: ubuntu-latest
|
||||
container-image: ${{ matrix.container-image }}
|
||||
container-volumes: ${{ matrix.container-volumes || '' }}
|
||||
java-version: ${{ matrix.java-version }}
|
||||
java-distribution: ${{ matrix.java-distribution }}
|
||||
use-builtin-jdk: ${{ matrix.use-builtin-jdk || false }}
|
||||
|
||||
integ-linux-arm64:
|
||||
name: integ / linux-arm64
|
||||
needs: [build-linux-arm64, build-jars]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
test-platform: [linux-arm64]
|
||||
java-version: [8, 11, 17, 21, 25]
|
||||
java-distribution: [corretto]
|
||||
container-image: [arm:latest]
|
||||
uses: ./.github/workflows/integ.yml
|
||||
with:
|
||||
platform: linux-arm64
|
||||
test-platform: ${{ matrix.test-platform }}
|
||||
runner: ubuntu-24.04-arm
|
||||
container-image: ${{ matrix.container-image }}
|
||||
container-volumes: ${{ matrix.container-volumes || '' }}
|
||||
java-version: ${{ matrix.java-version }}
|
||||
java-distribution: ${{ matrix.java-distribution }}
|
||||
|
||||
integ-macos:
|
||||
name: integ / macos
|
||||
needs: [build-macos, build-jars]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-15
|
||||
test-platform: macos-arm64
|
||||
java-version: "11"
|
||||
- runner: macos-15
|
||||
test-platform: macos-arm64
|
||||
java-version: "21"
|
||||
- runner: macos-15-intel
|
||||
test-platform: macos-x64
|
||||
java-version: "17"
|
||||
architecture: x64
|
||||
retry-count: 1
|
||||
uses: ./.github/workflows/integ.yml
|
||||
with:
|
||||
platform: macos
|
||||
test-platform: ${{ matrix.test-platform }}
|
||||
runner: ${{ matrix.runner }}
|
||||
java-version: ${{ matrix.java-version }}
|
||||
architecture: ${{ matrix.architecture || '' }}
|
||||
retry-count: ${{ matrix.retry-count || 0 }}
|
||||
|
||||
publish-only-on-push:
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
permissions:
|
||||
contents: write
|
||||
name: publish (nightly)
|
||||
runs-on: ubuntu-latest
|
||||
needs: [integ-linux-x64, integ-linux-arm64, integ-macos]
|
||||
steps:
|
||||
- name: Download async-profiler binaries and jars
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: 'async-profiler-*'
|
||||
merge-multiple: 'true'
|
||||
- name: Delete previous release and publish new release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const fs = require('fs').promises;
|
||||
const commonOptions = {
|
||||
owner: "async-profiler",
|
||||
repo: "async-profiler",
|
||||
};
|
||||
let previousRelease = undefined;
|
||||
try {
|
||||
previousRelease = await github.rest.repos.getReleaseByTag({
|
||||
...commonOptions,
|
||||
tag: "nightly",
|
||||
});
|
||||
} catch (e) {
|
||||
console.log("No previous nightly release");
|
||||
// ignore, there was no previous nightly release
|
||||
}
|
||||
if (previousRelease !== undefined) {
|
||||
// delete previous release and nightly tag
|
||||
await github.rest.repos.deleteRelease({
|
||||
...commonOptions,
|
||||
release_id: previousRelease.data.id,
|
||||
});
|
||||
await github.rest.git.deleteRef({...commonOptions, ref: "tags/nightly"});
|
||||
}
|
||||
// create draft release
|
||||
const newReleaseId = (await github.rest.repos.createRelease({
|
||||
...commonOptions,
|
||||
tag_name: "nightly",
|
||||
target_commitish: "${{ github.sha }}",
|
||||
name: "Nightly builds",
|
||||
body: "Async-profiler binaries published automatically from the latest sources in `master` upon a successful build.",
|
||||
prerelease: true,
|
||||
draft: true,
|
||||
})).data.id;
|
||||
// upload binaries and jars to draft release
|
||||
for (const archiveName of await fs.readdir(process.cwd())) {
|
||||
await github.rest.repos.uploadReleaseAsset({
|
||||
...commonOptions,
|
||||
release_id: newReleaseId,
|
||||
name: archiveName,
|
||||
data: await fs.readFile(archiveName),
|
||||
});
|
||||
}
|
||||
// publish release
|
||||
await github.rest.repos.updateRelease({
|
||||
...commonOptions,
|
||||
release_id: newReleaseId,
|
||||
draft: false,
|
||||
});
|
||||
2
.gitignore
vendored
@@ -6,3 +6,5 @@
|
||||
/test/*.class
|
||||
.vscode
|
||||
*.iml
|
||||
/src/api/**/*.class
|
||||
.gdb_history
|
||||
|
||||
24
.licenserc.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
header:
|
||||
- paths:
|
||||
- 'src/jattach'
|
||||
license:
|
||||
content: |
|
||||
Copyright The jattach authors
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
comment: on-failure
|
||||
|
||||
- paths:
|
||||
- 'src'
|
||||
- 'test'
|
||||
paths-ignore:
|
||||
- 'src/jattach'
|
||||
- 'src/res'
|
||||
- '**/MANIFEST.MF'
|
||||
- 'test/**/*.collapsed'
|
||||
license:
|
||||
content: |
|
||||
Copyright The async-profiler authors
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
comment: on-failure
|
||||
261
CHANGELOG.md
@@ -1,5 +1,258 @@
|
||||
# Changelog
|
||||
|
||||
## [4.4]
|
||||
|
||||
### Features
|
||||
|
||||
- #1553: Differential Flame Graphs
|
||||
|
||||
### Improvements
|
||||
|
||||
- #1705: `memlimit` option to limit size of the call trace storage
|
||||
- #1706: Extend syntax of `-j` option to truncate deep stacks
|
||||
- #1720: FlameGraph: Dark mode toggle
|
||||
- #1672: FlameGraph: Use Ctrl+Click in addition to Alt+Click to remove stacks
|
||||
- #1684: Unwind ARM64 generated stubs on JDK 26+
|
||||
- #1676: Make `dwarf` stack walking mode an alias for `vm`
|
||||
- #1671: An option to select TLAB based AllocTracer engine with JDK 11+
|
||||
- #1670: Move converter Main class to the one.convert package
|
||||
- #1660: Provide non-aggregated samples in OTLP converter
|
||||
- #1701, #1682: Speed-up stack walking
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- #1673: Permanently remove `check` command
|
||||
- #1675: Remove unsafe AsyncGetCallTrace recovery tricks along with `safemode` option
|
||||
- #1677: Remove `cstack=lbr` option
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- #1727: Allocation profile has wrong units in OTLP format
|
||||
- #1716: Wall-clock Heatmap does not count samples correctly
|
||||
- #1715: Fix Zing crash when profiling cpu+wall together
|
||||
- #1708: Another fix for correct vDSO unwinding on ARM64
|
||||
- #1707: Workaround for JFR shutdown race
|
||||
- #1699: Allow negative keys in JFR constant pool
|
||||
- #1697: Ensure remaining buffer is sufficient for event data in JfrReader
|
||||
- #1657: Re-enable workaround for a long attach on JDK 8
|
||||
- #1654: Prefer perf-events engine when record-cpu or target-cpu are selected
|
||||
- #1585: Scale perf counters in case of multiplexing
|
||||
- #1528: Add a hard-coded limit on the maximum number of jmethodIDs
|
||||
- #1203: Fix "Instance field not found" when using `-Xcheck:jni` on JDK 8
|
||||
- Do not walk past virtual thread continuation barriers
|
||||
|
||||
## [4.3] - 2026-01-20
|
||||
|
||||
### Features
|
||||
|
||||
- #1547: Native lock profiling
|
||||
- #1566: Filter cpu/wall profiles by latency
|
||||
- #1568: Expose async-profiler metrics in Prometheus format
|
||||
- #1628: async-profiler.jar as Java agent; remote control via JMX
|
||||
|
||||
### Improvements
|
||||
|
||||
- #1140: FlameGraph improvements: legend, hot keys, new toolbar icons
|
||||
- #1530: Timezone switcher between Local and UTC time in Heatmaps
|
||||
- #1582: Support `--include`/`--exclude` options for JFR to Heatmap/OTLP/pprof conversion
|
||||
- #1624: Compatibility with OTLP v1.9.0
|
||||
- #1629: Harden crash protection in StackWalker
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- #1277: New `timeSpan` field in WallClockSample events
|
||||
- #1518: Deprecate `check` command
|
||||
- #1590: Support compilation on modern JDKs. Drop JDK 7 support
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- #1599: Workaround for the kernel PERF_EVENT_IOC_REFRESH bug
|
||||
- #1596: Do not block any signals during execution of a custom crash handler
|
||||
- #1584: JfrReader loops on corrupted recordings
|
||||
- #1555: Parse FlameGraph title from HTML input
|
||||
- #1621: `loop` and `timeout` options do not work together
|
||||
- #1641: Unwind vDSO correctly on Linux-ARM64
|
||||
- #1648: Fix stop sequence in Profiler::start
|
||||
- #1575: Fix CodeCache memory leak in lock profiling while looping
|
||||
- #1558: Fix record-cpu bug when kernel stacks are not available
|
||||
- #1651: Do not record CPU frame for non-perf samples
|
||||
- #1614, #1615, #1617, #1623: Fix races related to VM termination
|
||||
|
||||
## [4.2.1] - 2025-11-22
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- #1599: Workaround for the kernel PERF_EVENT_IOC_REFRESH bug
|
||||
- #1596: Do not block any signals during execution of a custom crash handler
|
||||
|
||||
## [4.2] - 2025-10-20
|
||||
|
||||
### Features
|
||||
|
||||
- Java Method Tracing and Latency Profiling
|
||||
* #1421: Latency profiling
|
||||
* #1435: Allow wildcards in Instrument profiling engine
|
||||
* #1499: `--trace` option with per-method latency threshold
|
||||
- System-wide process sampling on Linux
|
||||
* #1411: `--proc` option to record `profiler.ProcessSample` events
|
||||
- VMStructs stack walker by default
|
||||
* #1539: Use VMStructs stack walking mode by default
|
||||
* #1537: Support `comptask` and `vtable` features
|
||||
* #1517: Use JavaFrameAnchor to find top Java frame
|
||||
* #1449: Special handling of prologue and epilogue of compiled methods
|
||||
|
||||
### Improvements
|
||||
|
||||
- #1475: Add `CPUTimeSample` event support to jfrconv
|
||||
- #1414: Per-thread flamegraph option in JFR heatmap converter
|
||||
- #1526: Expose JfrReader dictionary that maps osThreadId to javaThreadId
|
||||
- #1448: Thread name in OpenTelemetry output
|
||||
- #1413: Add `time_nanos` and `duration_nanos` to OTLP profiles
|
||||
- #1450: Unwind dylib stubs as empty frames on macOS
|
||||
- #1416: Add synthetic symbols for Mach-O stubs/trampolines
|
||||
- Allow cross-compilation for 32-bit platforms
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- #1515: Fix UnsatisfiedLinkError when tmpdir is set to a relative path
|
||||
- #1500: Detect if `calloc` calls `malloc` for nativemem profiling
|
||||
- #1427: Re-implement SafeAccess crash protection
|
||||
- #1417: Two wall-clock profilers interfere with each other
|
||||
|
||||
### Project Infrastructure
|
||||
|
||||
- #1527: GHA: replace macos-13 with macos-15-intel
|
||||
- #1510: Add option to retry tests
|
||||
- #1508: Add more GHA jobs to cover JDK versions on ARM
|
||||
- #1502: Fix job dependencies between integration tests and builds
|
||||
- #1466: Add Liberica JDK on Alpaquita Linux to the CI
|
||||
- Made integration tests more stable overall
|
||||
|
||||
## [4.1] - 2025-07-21
|
||||
|
||||
### Features
|
||||
- Experimental support for the OpenTelemetry profiling signal
|
||||
* #1188: OTLP output format and `dumpOtlp` Java API
|
||||
* #1336: JFR to OTLP converter
|
||||
- JDK 25 support
|
||||
* #1222: Update VMStructs for JDK 25
|
||||
- Productize native memory profiling
|
||||
* #1193: Full `nativemem` support on macOS
|
||||
* #1254: Fixed Nativemem tests on Alpine
|
||||
* #1269: Native memory profiling now works with `jemalloc`
|
||||
* #1323: `nativemem` shows allocations inside async-profiler itself
|
||||
|
||||
### Improvements
|
||||
- #1174: Detect JVM in non-Java application and attach to it
|
||||
- #1223: Native API to add custom events in JFR recording
|
||||
- #1259: `--all` option to collect all possible events simultaneously
|
||||
- #1286: Record which CPU a sample was taken on
|
||||
- #1299: Skip last 10% allocations for leak detection
|
||||
- #1300: Allow profiling kprobes/uprobes with `--fdtransfer`
|
||||
- #1366: Rewrite `jfrconv` executable to shell
|
||||
- #1400: Unwind checksum and digest intrinsics on ARM64
|
||||
- #1357, #1389: VMStructs-based stack unwinding for `alloc` and `nativemem` profiling
|
||||
|
||||
### Bug fixes
|
||||
- #1251: `--ttsp` option does not work on Alpine
|
||||
- #1264: Guard hook installation with dlopen/dlclose
|
||||
- #1319: SIGSEGV in PerfEvents::walk
|
||||
- #1350: Disable JFR OldObjectSample event in jfrsync mode
|
||||
- #1358: Do not dereference jmethodIDs on JDK 26
|
||||
- #1374: Correctly check if profiler is preloaded
|
||||
- #1380: Workaround clang type promotion bug
|
||||
- #1387: JFR writer crashes when using cstack=vmx
|
||||
- #1393: Improve stack walking termination logic: no endless `unknown` frames
|
||||
- Stack unwinding fixes for ARM64
|
||||
|
||||
### Project Infrastructure
|
||||
- #1129: Command-line option to filter tests
|
||||
- #1262: Include `asprof.h` in async-profiler release package
|
||||
- #1271: Release additional binaries with debug symbols
|
||||
- #1274: Add Corretto 8 to the test matrix
|
||||
- #1246, #1226: Run tests on Amazon Linux and Alpine Linux
|
||||
- #1360: Auto-generated clang-tidy review comments
|
||||
- #1373: Save all generated test logs for debug purposes
|
||||
- Fixed flaky tests (#1282, #1307, #1376)
|
||||
|
||||
## [4.0] - 2025-04-08
|
||||
|
||||
### Features
|
||||
- #895, #905: `jfrconv` binary and numerous converter enhancements
|
||||
- #944: Interactive Heatmap
|
||||
- #1064: Native memory leak profiler
|
||||
- #1002: An option to display instruction addresses
|
||||
- #1007: Optimize wall clock profiling
|
||||
- #1073: Productize VMStructs-based stack walker: `--cstack vm/vmx`
|
||||
- #1169: C API for accessing thread-local profiling context
|
||||
|
||||
### Improvements
|
||||
- #923: Support JDK 23+
|
||||
- #952: Solve musl and glibc compatibility issues; link `libstdc++` statically
|
||||
- #955: `--libpath` option to specify path to `libasyncProfiler.so` in a container
|
||||
- #1018: `--grain` converter option to coarsen flame graphs
|
||||
- #1046: `--nostop` option to continue profiling outside `--begin`/`--end` window
|
||||
- #1178: `--inverted` option to flip flame graphs vertically
|
||||
- #1009: Allows collecting allocation and live object traces at the same time
|
||||
- #925: An option to accumulate JFR events in memory instead of flushing to a file
|
||||
- #929: Load symbols from debuginfod cache
|
||||
- #982: Sample contended locks by overflowing interval bucket
|
||||
- #993: Filter native frames in allocation profile
|
||||
- #896: FlameGraph: `Alt+Click` to remove stacks
|
||||
- #1097: FlameGraph: `N`/`Shift+N` to navigate through search results
|
||||
- #1182: Retain by-thread grouping when reversing FlameGraph
|
||||
- #1167: Log when no samples are collected
|
||||
- #1044: Fall back to `ctimer` for CPU profiling when perf_events are unavailable
|
||||
- #1068: Count missed samples when estimating total CPU time in `ctimer` mode
|
||||
- #1142: Use counter-timer register for timestamps on ARM64
|
||||
- #1123: Support `clock=tsc` without a JVM
|
||||
- #1070: Demangle Rust v0 symbols
|
||||
- #1007: Use `ExecutionSample` event for CPU profiling and `WallClockSample` for Wall clock profiling
|
||||
- #1011: Obtain `can_generate_sampled_object_alloc_events` JVMTI capability only when needed
|
||||
- #1013: Intercept java.util.concurrent locks more efficiently
|
||||
- #759: Discover available profiling signal automatically
|
||||
- #884: Record event timestamps early
|
||||
- #885: Print error message if JVM fails to load libasyncProfiler
|
||||
- #892: Resolve tracepoint id in `asprof`
|
||||
- Suppress dynamic attach warning on JDK 21+
|
||||
|
||||
### Bug fixes
|
||||
- #1143: Crash on macOS when using thread filter
|
||||
- #1125: Fixed parsing concurrently loaded libraries
|
||||
- #1095: jfr print fails when a recording has empty pools
|
||||
- #1084: Fixed Logging related races
|
||||
- #1074: Parse both .rela.dyn and .rela.plt sections
|
||||
- #1003: Support both tracefs and debugfs for kernel tracepoints
|
||||
- #986: Profiling output respects loglevel
|
||||
- #981: Avoid JVM crash by deleting JNI refs after `GetMethodDeclaringClass`
|
||||
- #934: Fix crash on Zing in a native thread
|
||||
- #843: Fix race between parsing and concurrent unloading of shared libraries
|
||||
- #1147, #1151: Deadlocks with jemalloc and tcmalloc profilers
|
||||
- Stack walking fixes for ARM64
|
||||
- Converter fixes for `jfrsync` profiles
|
||||
- Fixed parsing non-PIC executables and shared objects with non-standard section layout
|
||||
- Fixed recursion in `pthread_create` when using native profiling API
|
||||
- Fixed crashes on Alpine when profiling native apps
|
||||
- Fixed warnings with `-Xcheck:jni`
|
||||
- Fixed "Unsupported JVM" on OpenJ9 JDK 21
|
||||
- Fixed DefineClass crash on OpenJ9
|
||||
- JfrReader should handle custom events properly
|
||||
- Handle truncated JFRs
|
||||
|
||||
### Project Infrastructure
|
||||
- Restructure and update documentation
|
||||
- Implement test framework; add new integration tests
|
||||
- Unit test framework for C++ code
|
||||
- Run CI on all supported platforms
|
||||
- Test multiple JDK versions in CI
|
||||
- Add GHA to validate license headers
|
||||
- Add Markdown checker and formatter
|
||||
- Add Issue and Pull Request templates
|
||||
- Add Contributing Guidelines and Code of Conduct
|
||||
- Run static analyzer and fix found issues (#1034, #1039, #1049, #1051, #1098)
|
||||
- Provide Dockerfile for building async-profiler release packages
|
||||
- Publish nightly builds automatically
|
||||
|
||||
## [3.0] - 2024-01-20
|
||||
|
||||
### Features
|
||||
@@ -253,7 +506,7 @@
|
||||
### Features
|
||||
- Converters between different output formats:
|
||||
- JFR -> nflx (FlameScope)
|
||||
- Collapsed stacks -> HTML 5 Flame Graph
|
||||
- Collapsed stacks -> HTML 5 Flame Graph
|
||||
|
||||
### Improvements
|
||||
- `profiler.sh` no longer requires bash (contributed by @cfstras)
|
||||
@@ -337,7 +590,7 @@
|
||||
### Features
|
||||
- Interactive Call tree and Backtrace tree in HTML format (contributed by @rpulle)
|
||||
- Experimental support for Java Flight Recorder (JFR) compatible output
|
||||
|
||||
|
||||
### Improvements
|
||||
- Added units: `ms`, `us`, `s` and multipliers: `K`, `M`, `G` for interval argument
|
||||
- API and command-line option `-v` for profiler version
|
||||
@@ -351,7 +604,7 @@
|
||||
|
||||
### Features
|
||||
- Profiling of native functions, e.g. malloc
|
||||
|
||||
|
||||
### Improvements
|
||||
- JDK 9, 10, 11 support for heap profiling with accurate stack traces
|
||||
- `root` can now profile Java processes of any user
|
||||
@@ -363,7 +616,7 @@
|
||||
- Produce SVG files out of the box; flamegraph.pl is no longer needed
|
||||
- Profile ReentrantLock contention
|
||||
- Java API
|
||||
|
||||
|
||||
### Improvements
|
||||
- Allocation and Lock profiler now works on JDK 7, too
|
||||
- Faster dumping of results
|
||||
|
||||
4
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,4 @@
|
||||
## Code of Conduct
|
||||
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
|
||||
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
|
||||
opensource-codeofconduct@amazon.com with any additional questions or comments.
|
||||
59
CONTRIBUTING.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
|
||||
documentation, we greatly value feedback and contributions from our community.
|
||||
|
||||
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
|
||||
information to effectively respond to your bug report or contribution.
|
||||
|
||||
|
||||
## Security issue notifications
|
||||
If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue.
|
||||
|
||||
|
||||
## Reporting Bugs/Feature Requests
|
||||
|
||||
We welcome you to use the GitHub issue tracker to report bugs or suggest features.
|
||||
|
||||
When filing an issue, please check [existing open](https://github.com/async-profiler/async-profiler/issues), or [recently closed](https://github.com/async-profiler/async-profiler/issues?q=is%3Aissue+is%3Aclosed), issues to make sure somebody else hasn't already
|
||||
reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
|
||||
|
||||
* A reproducible test case or series of steps
|
||||
* The version of our code being used
|
||||
* Any modifications you've made relevant to the bug
|
||||
* Anything unusual about your environment or deployment
|
||||
|
||||
|
||||
## Contributing via Pull Requests
|
||||
Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
|
||||
|
||||
1. You are working against the latest source on the *master* branch.
|
||||
2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
|
||||
3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
|
||||
|
||||
To send us a pull request, please:
|
||||
|
||||
1. Fork the repository.
|
||||
2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
|
||||
3. Ensure local tests pass.
|
||||
4. Commit to your fork using clear commit messages.
|
||||
5. Send us a pull request, answering any default questions in the pull request interface.
|
||||
6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
|
||||
|
||||
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
|
||||
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
|
||||
## Finding contributions to work on
|
||||
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/async-profiler/async-profiler/labels/help%20wanted) issues is a great place to start.
|
||||
|
||||
|
||||
## Code of Conduct
|
||||
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
|
||||
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
|
||||
opensource-codeofconduct@amazon.com with any additional questions or comments.
|
||||
|
||||
|
||||
## Licensing
|
||||
|
||||
See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
|
||||
289
Makefile
@@ -1,34 +1,88 @@
|
||||
PROFILER_VERSION=3.0
|
||||
PROFILER_VERSION ?= 4.4
|
||||
|
||||
ifeq ($(COMMIT_TAG),true)
|
||||
PROFILER_VERSION := $(PROFILER_VERSION)-$(shell git rev-parse --short=8 HEAD)
|
||||
else ifneq ($(COMMIT_TAG),)
|
||||
PROFILER_VERSION := $(PROFILER_VERSION)-$(COMMIT_TAG)
|
||||
endif
|
||||
|
||||
TMP_DIR=/tmp
|
||||
COMMA=,
|
||||
PACKAGE_NAME=async-profiler-$(PROFILER_VERSION)-$(OS_TAG)-$(ARCH_TAG)
|
||||
PACKAGE_DIR=/tmp/$(PACKAGE_NAME)
|
||||
PACKAGE_DIR=$(TMP_DIR)/$(PACKAGE_NAME)
|
||||
DEBUG_PACKAGE_NAME=$(PACKAGE_NAME)-debug
|
||||
DEBUG_PACKAGE_DIR=$(PACKAGE_DIR)-debug
|
||||
|
||||
ASPROF=bin/asprof
|
||||
JFRCONV=bin/jfrconv
|
||||
LIB_PROFILER=lib/libasyncProfiler.$(SOEXT)
|
||||
LIB_PROFILER_DEBUG=libasyncProfiler.$(SOEXT).debug
|
||||
ASPROF_HEADER=include/asprof.h
|
||||
API_JAR=jar/async-profiler.jar
|
||||
CONVERTER_JAR=jar/jfr-converter.jar
|
||||
TEST_JAR=test.jar
|
||||
|
||||
CFLAGS=-O3 -fno-exceptions
|
||||
CXXFLAGS=-O3 -fno-exceptions -fno-omit-frame-pointer -fvisibility=hidden
|
||||
CC ?= gcc
|
||||
CXX ?= g++
|
||||
STRIP ?= strip
|
||||
OBJCOPY ?= objcopy
|
||||
|
||||
ifneq ($(CROSS_COMPILE),)
|
||||
CC := $(CROSS_COMPILE)gcc
|
||||
CXX := $(CROSS_COMPILE)g++
|
||||
AS := $(CROSS_COMPILE)as
|
||||
LD := $(CROSS_COMPILE)ld
|
||||
STRIP := $(CROSS_COMPILE)strip
|
||||
OBJCOPY := $(CROSS_COMPILE)objcopy
|
||||
endif
|
||||
|
||||
CFLAGS_EXTRA ?=
|
||||
CXXFLAGS_EXTRA ?=
|
||||
CFLAGS=-O3 -fno-exceptions $(CFLAGS_EXTRA)
|
||||
CXXFLAGS=-O3 -fno-exceptions -fno-omit-frame-pointer -fvisibility=hidden -std=c++11 $(CXXFLAGS_EXTRA)
|
||||
CPPFLAGS=
|
||||
DEFS=-DPROFILER_VERSION=\"$(PROFILER_VERSION)\"
|
||||
INCLUDES=-I$(JAVA_HOME)/include -Isrc/helper
|
||||
LIBS=-ldl -lpthread
|
||||
MERGE=true
|
||||
GCOV ?= gcov
|
||||
|
||||
JAVAC=$(JAVA_HOME)/bin/javac
|
||||
JAR=$(JAVA_HOME)/bin/jar
|
||||
JAVA=$(JAVA_HOME)/bin/java
|
||||
JAVA_TARGET=8
|
||||
JAVAC_OPTIONS=--release $(JAVA_TARGET) -Xlint:-options
|
||||
TEST_JAVA ?= $(JAVA_HOME)/bin/java
|
||||
|
||||
SOURCES := $(wildcard src/*.cpp)
|
||||
TEST_LIB_DIR=build/test/lib
|
||||
TEST_BIN_DIR=build/test/bin
|
||||
TEST_DEPS_DIR=test/deps
|
||||
TEST_GEN_DIR=test/gen
|
||||
LOG_DIR=build/test/logs
|
||||
LOG_LEVEL=
|
||||
SKIP=
|
||||
RETRY_COUNT=0
|
||||
TEST_THREADS ?= 8
|
||||
TEST_FLAGS=-DlogDir=$(LOG_DIR) -DlogLevel=$(LOG_LEVEL) -Dskip='$(subst $(COMMA), ,$(SKIP))' -DretryCount=$(RETRY_COUNT) -DthreadCount=$(TEST_THREADS)
|
||||
|
||||
# always sort SOURCES so zInit is last.
|
||||
SOURCES := $(sort $(wildcard src/*.cpp))
|
||||
HEADERS := $(wildcard src/*.h)
|
||||
RESOURCES := $(wildcard src/res/*)
|
||||
JAVA_HELPER_CLASSES := $(wildcard src/helper/one/profiler/*.class)
|
||||
API_SOURCES := $(wildcard src/api/one/profiler/*.java)
|
||||
JAR_MANIFEST := src/api/one/profiler/MANIFEST.MF
|
||||
CONVERTER_SOURCES := $(shell find src/converter -name '*.java')
|
||||
TEST_SOURCES := $(shell find test -name '*.java' ! -path 'test/stubs/*')
|
||||
TESTS ?=
|
||||
CPP_TEST_SOURCES := test/native/testRunner.cpp $(shell find test/native -name '*Test.cpp')
|
||||
CPP_TEST_HEADER := test/native/testRunner.hpp
|
||||
CPP_TEST_INCLUDES := -Isrc -Itest/native
|
||||
TEST_LIB_SOURCES := $(wildcard test/native/libs/*)
|
||||
TEST_BIN_SOURCES := $(shell find test/test -name "*.c*")
|
||||
|
||||
ifeq ($(JAVA_HOME),)
|
||||
export JAVA_HOME:=$(shell java -cp . JavaHome)
|
||||
JAVA_HOME:=$(shell java -cp . JavaHome)
|
||||
endif
|
||||
|
||||
OS:=$(shell uname -s)
|
||||
@@ -39,15 +93,15 @@ ifeq ($(OS),Darwin)
|
||||
PACKAGE_EXT=zip
|
||||
OS_TAG=macos
|
||||
ifeq ($(FAT_BINARY),true)
|
||||
FAT_BINARY_FLAGS=-arch x86_64 -arch arm64 -mmacos-version-min=10.12
|
||||
FAT_BINARY_FLAGS=-arch x86_64 -arch arm64 -mmacos-version-min=10.15
|
||||
CFLAGS += $(FAT_BINARY_FLAGS)
|
||||
CXXFLAGS += $(FAT_BINARY_FLAGS)
|
||||
PACKAGE_NAME=async-profiler-$(PROFILER_VERSION)-$(OS_TAG)
|
||||
MERGE=false
|
||||
SKIP_IN_RELEASE=true
|
||||
endif
|
||||
else
|
||||
CXXFLAGS += -Wl,-z,defs
|
||||
CXXFLAGS += -U_FORTIFY_SOURCE -Wl,-z,defs -Wl,--exclude-libs,ALL -static-libstdc++ -static-libgcc
|
||||
CXXFLAGS += -fdata-sections -ffunction-sections -Wl,--gc-sections -ggdb -Wunused-variable -Wno-psabi
|
||||
ifeq ($(MERGE),true)
|
||||
CXXFLAGS += -fwhole-program
|
||||
endif
|
||||
@@ -55,25 +109,19 @@ else
|
||||
INCLUDES += -I$(JAVA_HOME)/include/linux
|
||||
SOEXT=so
|
||||
PACKAGE_EXT=tar.gz
|
||||
ifeq ($(findstring musl,$(shell ldd /bin/ls)),musl)
|
||||
OS_TAG=linux-musl
|
||||
else
|
||||
OS_TAG=linux
|
||||
endif
|
||||
OS_TAG=linux
|
||||
endif
|
||||
|
||||
ARCH:=$(shell uname -m)
|
||||
ifeq ($(ARCH),x86_64)
|
||||
ARCH_TAG=x64
|
||||
else
|
||||
ifeq ($(findstring arm,$(ARCH)),arm)
|
||||
ifeq ($(findstring 64,$(ARCH)),64)
|
||||
ARCH_TAG=arm64
|
||||
else
|
||||
ARCH_TAG=arm32
|
||||
endif
|
||||
else ifeq ($(findstring aarch64,$(ARCH)),aarch64)
|
||||
ifeq ($(ARCH_TAG),)
|
||||
ARCH:=$(shell uname -m)
|
||||
ifeq ($(ARCH),x86_64)
|
||||
ARCH_TAG=x64
|
||||
else ifeq ($(ARCH),aarch64)
|
||||
ARCH_TAG=arm64
|
||||
else ifeq ($(ARCH),arm64)
|
||||
ARCH_TAG=arm64
|
||||
else ifeq ($(findstring arm,$(ARCH)),arm)
|
||||
ARCH_TAG=arm32
|
||||
else ifeq ($(ARCH),ppc64le)
|
||||
ARCH_TAG=ppc64le
|
||||
else ifeq ($(ARCH),riscv64)
|
||||
@@ -85,83 +133,206 @@ else
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq (,$(findstring $(ARCH_TAG),x86 x64 arm64))
|
||||
CXXFLAGS += -momit-leaf-frame-pointer
|
||||
STATIC_BINARY=$(findstring musl-gcc,$(CC))
|
||||
ifneq (,$(STATIC_BINARY))
|
||||
CFLAGS += -static -fdata-sections -ffunction-sections -Wl,--gc-sections
|
||||
endif
|
||||
|
||||
.PHONY: all jar release build-test test clean coverage clean-coverage build-test-java build-test-cpp test-cpp test-java check-md format-md
|
||||
|
||||
.PHONY: all jar release test native clean
|
||||
|
||||
all: build/bin build/lib build/$(LIB_PROFILER) build/$(ASPROF) jar build/$(JFRCONV)
|
||||
all: build/bin build/lib build/$(LIB_PROFILER) build/$(ASPROF) jar build/$(JFRCONV) build/$(ASPROF_HEADER)
|
||||
|
||||
jar: build/jar build/$(API_JAR) build/$(CONVERTER_JAR)
|
||||
|
||||
release: $(PACKAGE_NAME).$(PACKAGE_EXT)
|
||||
|
||||
$(PACKAGE_NAME).tar.gz: $(PACKAGE_DIR)
|
||||
patchelf --remove-needed ld-linux-x86-64.so.2 --remove-needed ld-linux-aarch64.so.1 $(PACKAGE_DIR)/$(LIB_PROFILER)
|
||||
tar czf $@ -C $(PACKAGE_DIR)/.. $(PACKAGE_NAME)
|
||||
rm -r $(PACKAGE_DIR)
|
||||
|
||||
tar czf $(DEBUG_PACKAGE_NAME).tar.gz -C $(DEBUG_PACKAGE_DIR)/.. $(DEBUG_PACKAGE_NAME)
|
||||
rm -r $(DEBUG_PACKAGE_DIR)
|
||||
|
||||
$(PACKAGE_NAME).zip: $(PACKAGE_DIR)
|
||||
ifneq ($(GITHUB_ACTIONS), true)
|
||||
codesign -s "Developer ID" -o runtime --timestamp -v $(PACKAGE_DIR)/$(ASPROF) $(PACKAGE_DIR)/$(JFRCONV) $(PACKAGE_DIR)/$(LIB_PROFILER)
|
||||
cat build/$(CONVERTER_JAR) >> $(PACKAGE_DIR)/$(JFRCONV)
|
||||
endif
|
||||
ditto -c -k --keepParent $(PACKAGE_DIR) $@
|
||||
rm -r $(PACKAGE_DIR)
|
||||
|
||||
$(PACKAGE_DIR): all LICENSE *.md
|
||||
mkdir -p $(PACKAGE_DIR)
|
||||
cp -RP build/bin build/lib LICENSE *.md $(PACKAGE_DIR)/
|
||||
$(PACKAGE_DIR): all LICENSE README.md
|
||||
rm -rf $@
|
||||
mkdir -p $(PACKAGE_DIR) $(DEBUG_PACKAGE_DIR)
|
||||
cp -RP build/bin build/lib build/include LICENSE README.md $(PACKAGE_DIR)/
|
||||
chmod -R 755 $(PACKAGE_DIR)
|
||||
chmod 644 $(PACKAGE_DIR)/lib/* $(PACKAGE_DIR)/LICENSE $(PACKAGE_DIR)/*.md
|
||||
chmod 644 $(PACKAGE_DIR)/lib/* $(PACKAGE_DIR)/include/* $(PACKAGE_DIR)/LICENSE $(PACKAGE_DIR)/README.md
|
||||
|
||||
ifeq ($(OS_TAG),linux)
|
||||
$(STRIP) --only-keep-debug build/$(LIB_PROFILER) -o $(DEBUG_PACKAGE_DIR)/$(LIB_PROFILER_DEBUG)
|
||||
$(STRIP) -g $@/$(LIB_PROFILER)
|
||||
$(OBJCOPY) --add-gnu-debuglink=$(DEBUG_PACKAGE_DIR)/$(LIB_PROFILER_DEBUG) $@/$(LIB_PROFILER)
|
||||
chmod 644 $(DEBUG_PACKAGE_DIR)/*
|
||||
endif
|
||||
|
||||
build/%:
|
||||
mkdir -p $@
|
||||
|
||||
build/$(ASPROF): src/main/* src/jattach/* src/fdtransfer.h
|
||||
$(CC) $(CPPFLAGS) $(CFLAGS) -DPROFILER_VERSION=\"$(PROFILER_VERSION)\" -o $@ src/main/*.cpp src/jattach/*.c
|
||||
strip $@
|
||||
$(CC) $(CPPFLAGS) $(CFLAGS) $(DEFS) -o $@ src/main/*.cpp src/jattach/*.c
|
||||
$(STRIP) $@
|
||||
|
||||
build/$(JFRCONV): src/launcher/* build/$(CONVERTER_JAR)
|
||||
$(CC) $(CPPFLAGS) $(CFLAGS) -DPROFILER_VERSION=\"$(PROFILER_VERSION)\" -o $@ src/launcher/*.cpp
|
||||
strip $@
|
||||
$(SKIP_IN_RELEASE) cat build/$(CONVERTER_JAR) >> $@
|
||||
build/$(JFRCONV): src/launcher/launcher.sh build/$(CONVERTER_JAR)
|
||||
sed -e 's/PROFILER_VERSION/$(PROFILER_VERSION)/g' -e 's/BUILD_DATE/$(shell date "+%b %d %Y")/g' src/launcher/launcher.sh > $@
|
||||
chmod +x $@
|
||||
cat build/$(CONVERTER_JAR) >> $@
|
||||
|
||||
build/$(LIB_PROFILER): $(SOURCES) $(HEADERS) $(RESOURCES) $(JAVA_HELPER_CLASSES)
|
||||
ifeq ($(MERGE),true)
|
||||
for f in src/*.cpp; do echo '#include "'$$f'"'; done |\
|
||||
$(CXX) $(CPPFLAGS) $(CXXFLAGS) -DPROFILER_VERSION=\"$(PROFILER_VERSION)\" $(INCLUDES) -fPIC -shared -o $@ -xc++ - $(LIBS)
|
||||
$(CXX) $(CPPFLAGS) $(CXXFLAGS) $(DEFS) $(INCLUDES) -fPIC -shared -o $@ -xc++ - $(LIBS)
|
||||
else
|
||||
$(CXX) $(CPPFLAGS) $(CXXFLAGS) -DPROFILER_VERSION=\"$(PROFILER_VERSION)\" $(INCLUDES) -fPIC -shared -o $@ $(SOURCES) $(LIBS)
|
||||
$(CXX) $(CPPFLAGS) $(CXXFLAGS) $(DEFS) $(INCLUDES) -fPIC -shared -o $@ $(SOURCES) $(LIBS)
|
||||
endif
|
||||
|
||||
build/$(API_JAR): $(API_SOURCES)
|
||||
build/$(ASPROF_HEADER): src/asprof.h
|
||||
mkdir -p build/include
|
||||
cp -f $< build/include
|
||||
|
||||
build/$(API_JAR): $(API_SOURCES) $(JAR_MANIFEST)
|
||||
mkdir -p build/api
|
||||
$(JAVAC) $(JAVAC_OPTIONS) -d build/api $(API_SOURCES)
|
||||
$(JAR) cf $@ -C build/api .
|
||||
$(JAR) cfm $@ $(JAR_MANIFEST) -C build/api .
|
||||
$(RM) -r build/api
|
||||
|
||||
build/$(CONVERTER_JAR): $(CONVERTER_SOURCES) $(RESOURCES)
|
||||
mkdir -p build/converter
|
||||
$(JAVAC) $(JAVAC_OPTIONS) -d build/converter $(CONVERTER_SOURCES)
|
||||
$(JAR) cfe $@ Main -C build/converter . -C src/res .
|
||||
$(JAR) cfe $@ one.convert.Main -C build/converter . -C src/res .
|
||||
$(RM) -r build/converter
|
||||
|
||||
%.class: %.java
|
||||
$(JAVAC) -source 7 -target 7 -Xlint:-options -g:none $^
|
||||
$(JAVAC) -source $(JAVA_TARGET) -target $(JAVA_TARGET) -Xlint:-options -g:none $^
|
||||
|
||||
test: all
|
||||
test/smoke-test.sh
|
||||
test/thread-smoke-test.sh
|
||||
test/alloc-smoke-test.sh
|
||||
test/load-library-test.sh
|
||||
test/fdtransfer-smoke-test.sh
|
||||
echo "All tests passed"
|
||||
build/test/cpptests: $(CPP_TEST_SOURCES) $(CPP_TEST_HEADER) $(SOURCES) $(HEADERS) $(RESOURCES) $(JAVA_HELPER_CLASSES)
|
||||
mkdir -p build/test
|
||||
ifeq ($(MERGE),true)
|
||||
for f in src/*.cpp test/native/*.cpp; do echo '#include "'$$f'"'; done |\
|
||||
$(CXX) $(CPPFLAGS) $(CXXFLAGS) $(DEFS) $(INCLUDES) $(CPP_TEST_INCLUDES) -fPIC -o $@ -xc++ - $(LIBS)
|
||||
else
|
||||
$(CXX) $(CPPFLAGS) $(CXXFLAGS) $(DEFS) $(INCLUDES) $(CPP_TEST_INCLUDES) -fPIC -o $@ $(SOURCES) $(CPP_TEST_SOURCES) $(LIBS)
|
||||
endif
|
||||
|
||||
native:
|
||||
mkdir -p native/linux-x64 native/linux-arm64 native/macos
|
||||
tar xfO async-profiler-$(PROFILER_VERSION)-linux-x64.tar.gz */build/libasyncProfiler.so > native/linux-x64/libasyncProfiler.so
|
||||
tar xfO async-profiler-$(PROFILER_VERSION)-linux-arm64.tar.gz */build/libasyncProfiler.so > native/linux-arm64/libasyncProfiler.so
|
||||
unzip -p async-profiler-$(PROFILER_VERSION)-macos.zip */build/libasyncProfiler.so > native/macos/libasyncProfiler.so
|
||||
build-test-java: all build/$(TEST_JAR) build/test/build-test-libs build/test/build-test-bins
|
||||
|
||||
build-test-cpp: build/test/cpptests build/test/build-test-libs
|
||||
|
||||
build-test: build-test-cpp build-test-java
|
||||
|
||||
build/test/build-test-libs: $(TEST_LIB_SOURCES)
|
||||
@mkdir -p $(TEST_LIB_DIR)
|
||||
$(CC) -shared -fPIC -o $(TEST_LIB_DIR)/libreladyn.$(SOEXT) test/native/libs/reladyn.c
|
||||
$(CC) -shared -fPIC -o $(TEST_LIB_DIR)/libcallsmalloc.$(SOEXT) test/native/libs/callsmalloc.c
|
||||
$(CC) -shared -fPIC $(INCLUDES) -Isrc -o $(TEST_LIB_DIR)/libjnimalloc.$(SOEXT) test/native/libs/jnimalloc.c
|
||||
$(CC) -shared -fPIC -o $(TEST_LIB_DIR)/libmalloc.$(SOEXT) test/native/libs/malloc.c
|
||||
$(CC) -fno-optimize-sibling-calls -shared -fPIC $(INCLUDES) -Isrc -o $(TEST_LIB_DIR)/libjninativestacks.$(SOEXT) test/native/libs/jninativestacks.c
|
||||
$(CC) -shared -fPIC $(INCLUDES) -Isrc -o $(TEST_LIB_DIR)/libjninativelocks.$(SOEXT) test/native/libs/jninativelocks.c -lpthread
|
||||
|
||||
ifeq ($(OS_TAG),linux)
|
||||
$(CC) -c -shared -fPIC -o $(TEST_LIB_DIR)/vaddrdif.o test/native/libs/vaddrdif.c
|
||||
$(LD) -N -shared -o $(TEST_LIB_DIR)/libvaddrdif.$(SOEXT) $(TEST_LIB_DIR)/vaddrdif.o -T test/native/libs/vaddrdif.ld
|
||||
|
||||
$(AS) -o $(TEST_LIB_DIR)/multiplematching.o test/native/libs/multiplematching.s
|
||||
$(LD) -shared -o $(TEST_LIB_DIR)/multiplematching.$(SOEXT) $(TEST_LIB_DIR)/multiplematching.o
|
||||
|
||||
$(AS) -o $(TEST_LIB_DIR)/twiceatzero.o test/native/libs/twiceatzero.s
|
||||
$(LD) -shared -o $(TEST_LIB_DIR)/libtwiceatzero.$(SOEXT) $(TEST_LIB_DIR)/twiceatzero.o --section-start=.seg1=0x4000 -z max-page-size=0x1000
|
||||
endif
|
||||
@touch $@
|
||||
|
||||
build/test/build-test-bins: $(TEST_BIN_SOURCES)
|
||||
@mkdir -p $(TEST_BIN_DIR)
|
||||
$(CC) -o $(TEST_BIN_DIR)/malloc_plt_dyn test/test/nativemem/malloc_plt_dyn.c
|
||||
$(CC) -o $(TEST_BIN_DIR)/native_api -Isrc test/test/c/native_api.c -ldl
|
||||
$(CC) -o $(TEST_BIN_DIR)/native_lock_contention test/test/nativelock/native_lock_contention.c -lpthread
|
||||
$(CC) -o $(TEST_BIN_DIR)/profile_with_dlopen -Isrc test/test/nativemem/profile_with_dlopen.c -ldl
|
||||
$(CC) -o $(TEST_BIN_DIR)/preload_malloc -Isrc test/test/nativemem/preload_malloc.c -ldl
|
||||
$(CC) -o $(TEST_BIN_DIR)/nativemem_known_lib_crash -Isrc test/test/nativemem/nativemem_known_lib_crash.c -ldl
|
||||
$(CXX) -o $(TEST_BIN_DIR)/non_java_app -std=c++11 $(INCLUDES) $(CPP_TEST_INCLUDES) test/test/nonjava/non_java_app.cpp $(LIBS)
|
||||
@touch $@
|
||||
|
||||
test-cpp: build-test-cpp
|
||||
echo "Running cpp tests..."
|
||||
LD_LIBRARY_PATH="$(TEST_LIB_DIR)" DYLD_LIBRARY_PATH="$(TEST_LIB_DIR)" build/test/cpptests
|
||||
|
||||
test-java: build-test-java
|
||||
echo "Running tests against $(LIB_PROFILER)"
|
||||
$(TEST_JAVA) $(TEST_FLAGS) -ea -cp "build/$(TEST_JAR):build/jar/*:$(TEST_DEPS_DIR)/*:$(TEST_GEN_DIR)/*" one.profiler.test.Runner $(subst $(COMMA), ,$(TESTS))
|
||||
|
||||
coverage: override FAT_BINARY=false
|
||||
coverage: clean-coverage
|
||||
$(MAKE) test-cpp CXXFLAGS_EXTRA="-fprofile-arcs -ftest-coverage -fPIC -O0 --coverage"
|
||||
mkdir -p build/test/coverage
|
||||
cd build/test/ && gcovr -r ../.. --html-details --gcov-executable "$(GCOV)" -o coverage/index.html
|
||||
rm -rf -- -.gc*
|
||||
|
||||
# unit tests shouldn't run if the user selects an integration test target
|
||||
ifeq ($(TESTS),)
|
||||
TEST_CPP := test-cpp
|
||||
endif
|
||||
|
||||
test: $(TEST_CPP) test-java
|
||||
|
||||
$(TEST_DEPS_DIR):
|
||||
mkdir -p $@
|
||||
|
||||
build/$(TEST_JAR): build/$(API_JAR) $(TEST_SOURCES) build/$(CONVERTER_JAR) $(TEST_DEPS_DIR)
|
||||
rm -rf build/test/classes
|
||||
mkdir -p build/test/classes
|
||||
$(JAVAC) -source $(JAVA_TARGET) -target $(JAVA_TARGET) -Xlint:-options -XDignore.symbol.file \
|
||||
-implicit:none \
|
||||
-cp "build/jar/*:$(TEST_DEPS_DIR)/*:$(TEST_GEN_DIR)/*:test/stubs" \
|
||||
-d build/test/classes \
|
||||
$(TEST_SOURCES)
|
||||
$(JAR) cf $@ -C build/test/classes .
|
||||
|
||||
update-otlp-classes-jar:
|
||||
@if [ -z "$(OTEL_PROTO_PATH)" ]; then \
|
||||
echo "'OTEL_PROTO_PATH' is empty"; \
|
||||
exit 1; \
|
||||
fi
|
||||
rm -rf $(TMP_DIR)/gen/java $(TMP_DIR)/build
|
||||
mkdir -p $(TMP_DIR)/gen/java $(TMP_DIR)/build $(TEST_GEN_DIR)
|
||||
cd $(OTEL_PROTO_PATH) && protoc --java_out=$(TMP_DIR)/gen/java $$(find . \
|
||||
-type f \
|
||||
-name '*.proto' \
|
||||
-not \( -name 'logs*.proto' -o -name 'metrics*.proto' -o -name 'trace*.proto' -o -name '*service.proto' \))
|
||||
$(JAVAC) -source $(JAVA_TARGET) \
|
||||
-target $(JAVA_TARGET) \
|
||||
-cp $(TEST_DEPS_DIR)/* \
|
||||
-d $(TMP_DIR)/build \
|
||||
-Xlint:-options \
|
||||
$$(find $(TMP_DIR)/gen/java -name "*.java")
|
||||
$(JAR) cvf $(TEST_GEN_DIR)/opentelemetry-gen-classes.jar -C $(TMP_DIR)/build .
|
||||
|
||||
LINT_SOURCES=`ls -1 src/*.cpp src/*/*.cpp | grep -v rustDemangle.cpp`
|
||||
CLANG_TIDY_ARGS_EXTRA=
|
||||
cpp-lint:
|
||||
clang-tidy $(LINT_SOURCES) $(CLANG_TIDY_ARGS_EXTRA) -- -x c++ $(CXXFLAGS) $(INCLUDES) $(DEFS) $(LIBS)
|
||||
|
||||
DIFF_BASE=
|
||||
cpp-lint-diff:
|
||||
git diff -U0 $(DIFF_BASE) -- 'src/*.cpp' 'src/**/*.cpp' 'src/*.h' 'src/**/*.h' ':!**/rustDemangle.cpp' | \
|
||||
clang-tidy-diff.py -p1 $(CLANG_TIDY_ARGS_EXTRA) -- -x c++ $(CXXFLAGS) $(INCLUDES) $(DEFS) $(LIBS)
|
||||
|
||||
check-md:
|
||||
prettier -c README.md "docs/**/*.md"
|
||||
|
||||
format-md:
|
||||
prettier -w README.md "docs/**/*.md"
|
||||
|
||||
clean-coverage:
|
||||
$(RM) -rf build/test/cpptests build/test/coverage
|
||||
|
||||
clean:
|
||||
$(RM) -r build
|
||||
|
||||
703
README.md
@@ -1,638 +1,117 @@
|
||||
# async-profiler
|
||||
# Async-profiler
|
||||
|
||||
This project is a low overhead sampling profiler for Java
|
||||
that does not suffer from [Safepoint bias problem](http://psy-lob-saw.blogspot.ru/2016/02/why-most-sampling-java-profilers-are.html).
|
||||
It features HotSpot-specific APIs to collect stack traces
|
||||
that does not suffer from the [Safepoint bias problem](http://psy-lob-saw.blogspot.ru/2016/02/why-most-sampling-java-profilers-are.html).
|
||||
It features HotSpot-specific API to collect stack traces
|
||||
and to track memory allocations. The profiler works with
|
||||
OpenJDK and other Java runtimes based on the HotSpot JVM.
|
||||
|
||||
async-profiler can trace the following kinds of events:
|
||||
- CPU cycles
|
||||
- Hardware and Software performance counters like cache misses, branch misses, page faults, context switches etc.
|
||||
- Allocations in Java Heap
|
||||
- Contented lock attempts, including both Java object monitors and ReentrantLocks
|
||||
Unlike traditional Java profilers, async-profiler monitors non-Java threads
|
||||
(e.g., GC and JIT compiler threads) and shows native and kernel frames in stack traces.
|
||||
|
||||
See our [Wiki](https://github.com/async-profiler/async-profiler/wiki) or
|
||||
[3 hours playlist](https://www.youtube.com/playlist?list=PLNCLTEx3B8h4Yo_WvKWdLvI9mj1XpTKBr)
|
||||
to learn about all features.
|
||||
What can be profiled:
|
||||
|
||||
## Download
|
||||
- CPU time
|
||||
- Allocations in Java Heap
|
||||
- Native memory allocations and leaks
|
||||
- Contended locks
|
||||
- Hardware and software performance counters like cache misses, page faults, context switches
|
||||
- and [more](docs/ProfilingModes.md).
|
||||
|
||||
Current release (3.0):
|
||||
See our [3 hours playlist](https://www.youtube.com/playlist?list=PLNCLTEx3B8h4Yo_WvKWdLvI9mj1XpTKBr)
|
||||
to learn about more features.
|
||||
|
||||
- Linux x64: [async-profiler-3.0-linux-x64.tar.gz](https://github.com/async-profiler/async-profiler/releases/download/v3.0/async-profiler-3.0-linux-x64.tar.gz)
|
||||
- Linux arm64: [async-profiler-3.0-linux-arm64.tar.gz](https://github.com/async-profiler/async-profiler/releases/download/v3.0/async-profiler-3.0-linux-arm64.tar.gz)
|
||||
- macOS x64/arm64: [async-profiler-3.0-macos.zip](https://github.com/async-profiler/async-profiler/releases/download/v3.0/async-profiler-3.0-macos.zip)
|
||||
- Converters between profile formats: [converter.jar](https://github.com/async-profiler/async-profiler/releases/download/v3.0/converter.jar)
|
||||
(JFR to Flame Graph, JFR to pprof, collapsed stacks to Flame Graph)
|
||||
# Download
|
||||
|
||||
[Previous releases](https://github.com/async-profiler/async-profiler/releases)
|
||||
### Stable release: [4.3](https://github.com/async-profiler/async-profiler/releases/tag/v4.3)
|
||||
|
||||
async-profiler also comes bundled with IntelliJ IDEA Ultimate 2018.3 and later.
|
||||
For more information refer to [IntelliJ IDEA documentation](https://www.jetbrains.com/help/idea/cpu-and-allocation-profiling-basic-concepts.html).
|
||||
- Linux x64: [async-profiler-4.3-linux-x64.tar.gz](https://github.com/async-profiler/async-profiler/releases/download/v4.3/async-profiler-4.3-linux-x64.tar.gz)
|
||||
- Linux arm64: [async-profiler-4.3-linux-arm64.tar.gz](https://github.com/async-profiler/async-profiler/releases/download/v4.3/async-profiler-4.3-linux-arm64.tar.gz)
|
||||
- macOS arm64/x64: [async-profiler-4.3-macos.zip](https://github.com/async-profiler/async-profiler/releases/download/v4.3/async-profiler-4.3-macos.zip)
|
||||
- Profile converters: [jfr-converter.jar](https://github.com/async-profiler/async-profiler/releases/download/v4.3/jfr-converter.jar)
|
||||
|
||||
## Supported platforms
|
||||
### Nightly builds
|
||||
|
||||
[The most recent binaries](https://github.com/async-profiler/async-profiler/releases/tag/nightly) corresponding
|
||||
to the latest successful commit in `master`.
|
||||
|
||||
For a build corresponding to one of the previous commits, go to
|
||||
[Nightly Builds](https://github.com/async-profiler/async-profiler/actions/workflows/test-and-publish-nightly.yml),
|
||||
click the desired build and scroll down to the artifacts section. These binaries are kept for 30 days.
|
||||
|
||||
# Quick start
|
||||
|
||||
In a typical use case, profiling a Java application is just a matter of a running `asprof` with a PID of a
|
||||
running Java process.
|
||||
|
||||
```
|
||||
$ asprof -d 30 -f flamegraph.html <PID>
|
||||
```
|
||||
|
||||
The above command translates to: run profiler for 30 seconds and save results to `flamegraph.html`
|
||||
as an interactive [Flame Graph](docs/FlamegraphInterpretation.md) that can be viewed in a browser.
|
||||
|
||||
[](https://htmlpreview.github.io/?https://github.com/async-profiler/async-profiler/blob/master/.assets/html/flamegraph.html)
|
||||
|
||||
Find more details in the [Getting started guide](docs/GettingStarted.md).
|
||||
|
||||
# Building
|
||||
|
||||
### Build status
|
||||
|
||||
[](https://github.com/async-profiler/async-profiler/actions/workflows/test-and-publish-nightly.yml)
|
||||
|
||||
### Minimum requirements
|
||||
|
||||
- make
|
||||
- GCC 7.5.0+ or Clang 7.0.0+
|
||||
- Static version of libstdc++ (e.g. on Amazon Linux 2023: `yum install libstdc++-static`)
|
||||
- JDK 11+
|
||||
|
||||
### How to build
|
||||
|
||||
Make sure `gcc`, `g++` and `java` are available on the `PATH`.
|
||||
Navigate to the root directory with async-profiler sources and run `make`.
|
||||
async-profiler launcher will be available at `build/bin/asprof`.
|
||||
|
||||
Other Makefile targets:
|
||||
|
||||
- `make test` - run unit and integration tests;
|
||||
- `make release` - package async-profiler binaries as `.tar.gz` (Linux) or `.zip` (macOS).
|
||||
|
||||
### Supported platforms
|
||||
|
||||
| | Officially maintained builds | Other available ports |
|
||||
|-----------|------------------------------|-------------------------------------------|
|
||||
| --------- | ---------------------------- | ----------------------------------------- |
|
||||
| **Linux** | x64, arm64 | x86, arm32, ppc64le, riscv64, loongarch64 |
|
||||
| **macOS** | x64, arm64 | |
|
||||
|
||||
## CPU profiling
|
||||
# Documentation
|
||||
|
||||
In this mode profiler collects stack trace samples that include **Java** methods,
|
||||
**native** calls, **JVM** code and **kernel** functions.
|
||||
## Basic usage
|
||||
|
||||
The general approach is receiving call stacks generated by `perf_events`
|
||||
and matching them up with call stacks generated by `AsyncGetCallTrace`,
|
||||
in order to produce an accurate profile of both Java and native code.
|
||||
Additionally, async-profiler provides a workaround to recover stack traces
|
||||
in some [corner cases](https://bugs.openjdk.java.net/browse/JDK-8178287)
|
||||
where `AsyncGetCallTrace` fails.
|
||||
- [Getting Started](docs/GettingStarted.md)
|
||||
- [Profiler Options](docs/ProfilerOptions.md)
|
||||
- [Profiling Modes](docs/ProfilingModes.md)
|
||||
- [Integrating async-profiler](docs/IntegratingAsyncProfiler.md)
|
||||
- [Profiling In Container](docs/ProfilingInContainer.md)
|
||||
|
||||
This approach has the following advantages compared to using `perf_events`
|
||||
directly with a Java agent that translates addresses to Java method names:
|
||||
## Profiler output
|
||||
|
||||
* Does not require `-XX:+PreserveFramePointer`, which introduces
|
||||
performance overhead that can be sometimes as high as 10%.
|
||||
- [Output Formats](docs/OutputFormats.md)
|
||||
- [FlameGraph Interpretation](docs/FlamegraphInterpretation.md)
|
||||
- [JFR Visualization](docs/JfrVisualization.md)
|
||||
- [Converter Usage](docs/ConverterUsage.md)
|
||||
- [Heatmap](docs/Heatmap.md)
|
||||
|
||||
* Does not require generating a map file for translating Java code addresses
|
||||
to method names.
|
||||
## Advanced usage
|
||||
|
||||
* Displays interpreter frames.
|
||||
|
||||
* Does not produce large intermediate files (perf.data) for further processing in
|
||||
user space scripts.
|
||||
|
||||
If you wish to resolve frames within `libjvm`, the [debug symbols](#installing-debug-symbols) are required.
|
||||
|
||||
## ALLOCATION profiling
|
||||
|
||||
The profiler can be configured to collect call sites where the largest amount
|
||||
of heap memory is allocated.
|
||||
|
||||
async-profiler does not use intrusive techniques like bytecode instrumentation
|
||||
or expensive DTrace probes which have significant performance impact.
|
||||
It also does not affect Escape Analysis or prevent from JIT optimizations
|
||||
like allocation elimination. Only actual heap allocations are measured.
|
||||
|
||||
The profiler features TLAB-driven sampling. It relies on HotSpot-specific
|
||||
callbacks to receive two kinds of notifications:
|
||||
- when an object is allocated in a newly created TLAB;
|
||||
- when an object is allocated on a slow path outside TLAB.
|
||||
|
||||
Sampling interval can be adjusted with `--alloc` option.
|
||||
For example, `--alloc 500k` will take one sample after 500 KB of allocated
|
||||
space on average. Prior to JDK 11, intervals less than TLAB size will not take effect.
|
||||
|
||||
### Installing Debug Symbols
|
||||
|
||||
Prior to JDK 11, the allocation profiler required HotSpot debug symbols.
|
||||
Some OpenJDK distributions (Amazon Corretto, Liberica JDK, Azul Zulu)
|
||||
already have them embedded in `libjvm.so`, other OpenJDK builds typically
|
||||
provide debug symbols in a separate package. For example, to install
|
||||
OpenJDK debug symbols on Debian / Ubuntu, run:
|
||||
```
|
||||
# apt install openjdk-17-dbg
|
||||
```
|
||||
(replace `17` with the desired version of JDK).
|
||||
|
||||
On CentOS, RHEL and some other RPM-based distributions, this could be done with
|
||||
[debuginfo-install](http://man7.org/linux/man-pages/man1/debuginfo-install.1.html) utility:
|
||||
```
|
||||
# debuginfo-install java-1.8.0-openjdk
|
||||
```
|
||||
|
||||
On Gentoo the `icedtea` OpenJDK package can be built with the per-package setting
|
||||
`FEATURES="nostrip"` to retain symbols.
|
||||
|
||||
The `gdb` tool can be used to verify if debug symbols are properly installed for the `libjvm` library.
|
||||
For example, on Linux:
|
||||
```
|
||||
$ gdb $JAVA_HOME/lib/server/libjvm.so -ex 'info address UseG1GC'
|
||||
```
|
||||
This command's output will either contain `Symbol "UseG1GC" is at 0xxxxx`
|
||||
or `No symbol "UseG1GC" in current context`.
|
||||
|
||||
## Wall-clock profiling
|
||||
|
||||
`-e wall` option tells async-profiler to sample all threads equally every given
|
||||
period of time regardless of thread status: Running, Sleeping or Blocked.
|
||||
For instance, this can be helpful when profiling application start-up time.
|
||||
|
||||
Wall-clock profiler is most useful in per-thread mode: `-t`.
|
||||
|
||||
Example: `asprof -e wall -t -i 5ms -f result.html 8983`
|
||||
|
||||
## Java method profiling
|
||||
|
||||
`-e ClassName.methodName` option instruments the given Java method
|
||||
in order to record all invocations of this method with the stack traces.
|
||||
|
||||
Example: `-e java.util.Properties.getProperty` will profile all places
|
||||
where `getProperty` method is called from.
|
||||
|
||||
Only non-native Java methods are supported. To profile a native method,
|
||||
use hardware breakpoint event instead, e.g. `-e Java_java_lang_Throwable_fillInStackTrace`
|
||||
|
||||
**Be aware** that if you attach async-profiler at runtime, the first instrumentation
|
||||
of a non-native Java method may cause the [deoptimization](https://github.com/openjdk/jdk/blob/bf2e9ee9d321ed289466b2410f12ad10504d01a2/src/hotspot/share/prims/jvmtiRedefineClasses.cpp#L4092-L4096)
|
||||
of all compiled methods. The subsequent instrumentation flushes only the _dependent code_.
|
||||
|
||||
The massive CodeCache flush doesn't occur if attaching async-profiler as an agent.
|
||||
|
||||
Here are some useful native methods that you may want to profile:
|
||||
* ```G1CollectedHeap::humongous_obj_allocate``` - trace _humongous allocations_ of the G1 GC,
|
||||
* ```JVM_StartThread``` - trace creation of new Java threads,
|
||||
* ```Java_java_lang_ClassLoader_defineClass1``` - trace class loading.
|
||||
|
||||
## Building
|
||||
|
||||
Build status: [](https://github.com/async-profiler/async-profiler/actions/workflows/ci.yml)
|
||||
|
||||
Make sure the `JAVA_HOME` environment variable points to your JDK installation,
|
||||
and then run `make`. GCC or Clang is required. After building, the profiler binaries
|
||||
will be in the `build` subdirectory.
|
||||
|
||||
## Basic Usage
|
||||
|
||||
As of Linux 4.6, capturing kernel call stacks using `perf_events` from a non-root
|
||||
process requires setting two runtime variables. You can set them using
|
||||
sysctl or as follows:
|
||||
|
||||
```
|
||||
# sysctl kernel.perf_event_paranoid=1
|
||||
# sysctl kernel.kptr_restrict=0
|
||||
```
|
||||
|
||||
async-profiler works in the context of the target Java application,
|
||||
i.e. it runs as an agent in the process being profiled.
|
||||
`asprof` is a tool to attach and control the agent.
|
||||
|
||||
A typical workflow would be to launch your Java application, attach
|
||||
the agent and start profiling, exercise your performance scenario, and
|
||||
then stop profiling. The agent's output, including the profiling results, will
|
||||
be displayed on the console where you've started `asprof`.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
$ jps
|
||||
9234 Jps
|
||||
8983 Computey
|
||||
$ asprof start 8983
|
||||
$ asprof stop 8983
|
||||
```
|
||||
|
||||
The following may be used in lieu of the `pid` (8983):
|
||||
|
||||
- The keyword `jps`, which will use the most recently launched Java process.
|
||||
- The application name as it appears in the `jps` output: e.g. `Computey`
|
||||
|
||||
Alternatively, you may specify `-d` (duration) argument to profile
|
||||
the application for a fixed period of time with a single command.
|
||||
|
||||
```
|
||||
$ asprof -d 30 8983
|
||||
```
|
||||
|
||||
By default, the profiling frequency is 100Hz (every 10ms of CPU time).
|
||||
Here is a sample of the output printed to the Java application's terminal:
|
||||
|
||||
```
|
||||
--- Execution profile ---
|
||||
Total samples: 687
|
||||
Unknown (native): 1 (0.15%)
|
||||
|
||||
--- 6790000000 (98.84%) ns, 679 samples
|
||||
[ 0] Primes.isPrime
|
||||
[ 1] Primes.primesThread
|
||||
[ 2] Primes.access$000
|
||||
[ 3] Primes$1.run
|
||||
[ 4] java.lang.Thread.run
|
||||
|
||||
... a lot of output omitted for brevity ...
|
||||
|
||||
ns percent samples top
|
||||
---------- ------- ------- ---
|
||||
6790000000 98.84% 679 Primes.isPrime
|
||||
40000000 0.58% 4 __do_softirq
|
||||
|
||||
... more output omitted ...
|
||||
```
|
||||
|
||||
This indicates that the hottest method was `Primes.isPrime`, and the hottest
|
||||
call stack leading to it comes from `Primes.primesThread`.
|
||||
|
||||
## Launching as an Agent
|
||||
|
||||
If you need to profile some code as soon as the JVM starts up, instead of using the `asprof`,
|
||||
it is possible to attach async-profiler as an agent on the command line. For example:
|
||||
|
||||
```
|
||||
$ java -agentpath:/path/to/libasyncProfiler.so=start,event=cpu,file=profile.html ...
|
||||
```
|
||||
|
||||
Agent library is configured through the JVMTI argument interface.
|
||||
The format of the arguments string is described
|
||||
[in the source code](https://github.com/async-profiler/async-profiler/blob/v3.0/src/arguments.cpp#L44).
|
||||
`asprof` actually converts command line arguments to that format.
|
||||
|
||||
For instance, `-e wall` is converted to `event=wall`, `-f profile.html`
|
||||
is converted to `file=profile.html`, and so on. However, some arguments are processed
|
||||
directly by `asprof`. E.g. `-d 5` results in 3 actions:
|
||||
attaching profiler agent with start command, sleeping for 5 seconds,
|
||||
and then attaching the agent again with stop command.
|
||||
|
||||
## Multiple events
|
||||
|
||||
It is possible to profile CPU, allocations, and locks at the same time.
|
||||
Instead of CPU, you may choose any other execution event: wall-clock,
|
||||
perf event, tracepoint, Java method, etc.
|
||||
|
||||
The only output format that supports multiple events together is JFR.
|
||||
The recording will contain the following event types:
|
||||
- `jdk.ExecutionSample`
|
||||
- `jdk.ObjectAllocationInNewTLAB` (alloc)
|
||||
- `jdk.ObjectAllocationOutsideTLAB` (alloc)
|
||||
- `jdk.JavaMonitorEnter` (lock)
|
||||
- `jdk.ThreadPark` (lock)
|
||||
|
||||
To start profiling cpu + allocations + locks together, specify
|
||||
```
|
||||
asprof -e cpu,alloc,lock -f profile.jfr ...
|
||||
```
|
||||
or use `--alloc` and `--lock` parameters with the desired threshold:
|
||||
```
|
||||
asprof -e cpu --alloc 2m --lock 10ms -f profile.jfr ...
|
||||
```
|
||||
The same, when starting profiler as an agent:
|
||||
```
|
||||
-agentpath:/path/to/libasyncProfiler.so=start,event=cpu,alloc=2m,lock=10ms,file=profile.jfr
|
||||
```
|
||||
|
||||
## Flame Graph visualization
|
||||
|
||||
async-profiler provides out-of-the-box [Flame Graph](https://github.com/BrendanGregg/FlameGraph) support.
|
||||
Specify `-o flamegraph` argument to dump profiling results as an interactive HTML Flame Graph.
|
||||
Also, Flame Graph output format will be chosen automatically if the target filename ends with `.html`.
|
||||
|
||||
```
|
||||
$ jps
|
||||
9234 Jps
|
||||
8983 Computey
|
||||
$ asprof -d 30 -f /tmp/flamegraph.html 8983
|
||||
```
|
||||
|
||||
[](https://htmlpreview.github.io/?https://github.com/async-profiler/async-profiler/blob/master/demo/flamegraph.html)
|
||||
|
||||
## Profiler Options
|
||||
|
||||
`asprof` command-line options.
|
||||
|
||||
* `start` - starts profiling in semi-automatic mode, i.e. profiler will run
|
||||
until `stop` command is explicitly called.
|
||||
|
||||
* `resume` - starts or resumes earlier profiling session that has been stopped.
|
||||
All the collected data remains valid. The profiling options are not preserved
|
||||
between sessions, and should be specified again.
|
||||
|
||||
* `stop` - stops profiling and prints the report.
|
||||
|
||||
* `dump` - dump collected data without stopping profiling session.
|
||||
|
||||
* `check` - check if the specified profiling event is available.
|
||||
|
||||
* `status` - prints profiling status: whether profiler is active and
|
||||
for how long.
|
||||
|
||||
* `meminfo` - prints used memory statistics.
|
||||
|
||||
* `list` - show the list of profiling events available for the target process
|
||||
(if PID is specified) or for the default JVM.
|
||||
|
||||
* `-d N` - the profiling duration, in seconds. If no `start`, `resume`, `stop`
|
||||
or `status` option is given, the profiler will run for the specified period
|
||||
of time and then automatically stop.
|
||||
Example: `asprof -d 30 8983`
|
||||
|
||||
* `-e event` - the profiling event: `cpu`, `alloc`, `lock`, `cache-misses` etc.
|
||||
Use `list` to see the complete list of available events.
|
||||
|
||||
In allocation profiling mode the top frame of every call trace is the class
|
||||
of the allocated object, and the counter is the heap pressure (the total size
|
||||
of allocated TLABs or objects outside TLAB).
|
||||
|
||||
In lock profiling mode the top frame is the class of lock/monitor, and
|
||||
the counter is number of nanoseconds it took to enter this lock/monitor.
|
||||
|
||||
Two special event types are supported on Linux: hardware breakpoints
|
||||
and kernel tracepoints:
|
||||
- `-e mem:<func>[:rwx]` sets read/write/exec breakpoint at function
|
||||
`<func>`. The format of `mem` event is the same as in `perf-record`.
|
||||
Execution breakpoints can be also specified by the function name,
|
||||
e.g. `-e malloc` will trace all calls of native `malloc` function.
|
||||
- `-e trace:<id>` sets a kernel tracepoint. It is possible to specify
|
||||
tracepoint symbolic name, e.g. `-e syscalls:sys_enter_open` will trace
|
||||
all `open` syscalls.
|
||||
|
||||
* `-i N` - sets the profiling interval in nanoseconds or in other units,
|
||||
if N is followed by `ms` (for milliseconds), `us` (for microseconds),
|
||||
or `s` (for seconds). Only CPU active time is counted. No samples
|
||||
are collected while CPU is idle. The default is 10000000 (10ms).
|
||||
Example: `asprof -i 500us 8983`
|
||||
|
||||
* `--alloc N` - allocation profiling interval in bytes or in other units,
|
||||
if N is followed by `k` (kilobytes), `m` (megabytes), or `g` (gigabytes).
|
||||
|
||||
* `--live` - retain allocation samples with live objects only
|
||||
(object that have not been collected by the end of profiling session).
|
||||
Useful for finding Java heap memory leaks.
|
||||
|
||||
* `--lock N` - lock profiling threshold in nanoseconds (or other units).
|
||||
In lock profiling mode, record contended locks that the JVM has waited for
|
||||
longer than the specified duration.
|
||||
|
||||
* `-j N` - sets the maximum stack depth. The default is 2048.
|
||||
Example: `asprof -j 30 8983`
|
||||
|
||||
* `-t` - profile threads separately. Each stack trace will end with a frame
|
||||
that denotes a single thread.
|
||||
Example: `asprof -t 8983`
|
||||
|
||||
* `-s` - print simple class names instead of FQN.
|
||||
|
||||
* `-n` - normalize names of hidden classes / lambdas.
|
||||
|
||||
* `-g` - print method signatures.
|
||||
|
||||
* `-a` - annotate JIT compiled methods with `_[j]`, inlined methods with `_[i]`, interpreted methods with `_[0]` and C1 compiled methods with `_[1]`.
|
||||
|
||||
* `-l` - prepend library names to symbols, e.g. ``libjvm.so`JVM_DefineClassWithSource``.
|
||||
|
||||
* `-o fmt` - specifies what information to dump when profiling ends.
|
||||
`fmt` can be one of the following options:
|
||||
- `traces[=N]` - dump call traces (at most N samples);
|
||||
- `flat[=N]` - dump flat profile (top N hot methods);
|
||||
can be combined with `traces`, e.g. `traces=200,flat=200`
|
||||
- `jfr` - dump events in Java Flight Recorder format readable by Java Mission Control.
|
||||
This *does not* require JDK commercial features to be enabled.
|
||||
- `collapsed` - dump collapsed call traces in the format used by
|
||||
[FlameGraph](https://github.com/brendangregg/FlameGraph) script. This is
|
||||
a collection of call stacks, where each line is a semicolon separated list
|
||||
of frames followed by a counter.
|
||||
- `flamegraph` - produce Flame Graph in HTML format.
|
||||
- `tree` - produce Call Tree in HTML format.
|
||||
`--reverse` option will generate backtrace view.
|
||||
|
||||
* `--total` - count the total value of the collected metric instead of the number of samples,
|
||||
e.g. total allocation size.
|
||||
|
||||
* `--chunksize N`, `--chunktime N` - approximate size and time limits for a single JFR chunk.
|
||||
A new chunk will be started whenever either limit is reached.
|
||||
The default `chunksize` is 100MB, and the default `chunktime` is 1 hour.
|
||||
Example: `asprof -f profile.jfr --chunksize 100m --chunktime 1h 8983`
|
||||
|
||||
* `-I include`, `-X exclude` - filter stack traces by the given pattern(s).
|
||||
`-I` defines the name pattern that *must* be present in the stack traces,
|
||||
while `-X` is the pattern that *must not* occur in any of stack traces in the output.
|
||||
`-I` and `-X` options can be specified multiple times. A pattern may begin or end with
|
||||
a star `*` that denotes any (possibly empty) sequence of characters.
|
||||
Example: `asprof -I 'Primes.*' -I 'java/*' -X '*Unsafe.park*' 8983`
|
||||
|
||||
* `-L level` - log level: `debug`, `info`, `warn`, `error` or `none`.
|
||||
|
||||
* `-F features` - comma separated list of HotSpot-specific features
|
||||
to include in stack traces. Supported features are:
|
||||
- `vtable` - display targets of megamorphic virtual calls as an extra frame
|
||||
on top of `vtable stub` or `itable stub`.
|
||||
- `comptask` - display current compilation task (a Java method being compiled)
|
||||
in a JIT compiler stack trace.
|
||||
|
||||
* `--title TITLE`, `--minwidth PERCENT`, `--reverse` - FlameGraph parameters.
|
||||
Example: `asprof -f profile.html --title "Sample CPU profile" --minwidth 0.5 8983`
|
||||
|
||||
* `-f FILENAME` - the file name to dump the profile information to.
|
||||
`%p` in the file name is expanded to the PID of the target JVM;
|
||||
`%t` - to the timestamp;
|
||||
`%n{MAX}` - to the sequence number;
|
||||
`%{ENV}` - to the value of the given environment variable.
|
||||
Example: `asprof -o collapsed -f /tmp/traces-%t.txt 8983`
|
||||
|
||||
* `--loop TIME` - run profiler in a loop (continuous profiling).
|
||||
The argument is either a clock time (`hh:mm:ss`) or
|
||||
a loop duration in `s`econds, `m`inutes, `h`ours, or `d`ays.
|
||||
Make sure the filename includes a timestamp pattern, or the output
|
||||
will be overwritten on each iteration.
|
||||
Example: `asprof --loop 1h -f /var/log/profile-%t.jfr 8983`
|
||||
|
||||
* `--all-user` - include only user-mode events. This option is helpful when kernel profiling
|
||||
is restricted by `perf_event_paranoid` settings.
|
||||
|
||||
* `--sched` - group threads by Linux-specific scheduling policy: BATCH/IDLE/OTHER.
|
||||
|
||||
* `--cstack MODE` - how to walk native frames (C stack). Possible modes are
|
||||
`fp` (Frame Pointer), `dwarf` (DWARF unwind info),
|
||||
`lbr` (Last Branch Record, available on Haswell since Linux 4.1),
|
||||
`vm` (HotSpot VM Structs) and `no` (do not collect C stack).
|
||||
|
||||
By default, C stack is shown in cpu, ctimer, wall-clock and perf-events profiles.
|
||||
Java-level events like `alloc` and `lock` collect only Java stack.
|
||||
|
||||
* `--signal NUM` - use alternative signal for cpu or wall clock profiling.
|
||||
To change both signals, specify two numbers separated by a slash: `--signal SIGCPU/SIGWALL`.
|
||||
|
||||
* `--clock SOURCE` - clock source for JFR timestamps: `tsc` (default)
|
||||
or `monotonic` (equivalent for `CLOCK_MONOTONIC`).
|
||||
|
||||
* `--begin function`, `--end function` - automatically start/stop profiling
|
||||
when the specified native function is executed.
|
||||
|
||||
* `--ttsp` - time-to-safepoint profiling. An alias for
|
||||
`--begin SafepointSynchronize::begin --end RuntimeService::record_safepoint_synchronized`
|
||||
It is not a separate event type, but rather a constraint. Whatever event type
|
||||
you choose (e.g. `cpu` or `wall`), the profiler will work as usual, except that
|
||||
only events between the safepoint request and the start of the VM operation
|
||||
will be recorded.
|
||||
|
||||
* `--jfropts OPTIONS` - comma separated list of JFR recording options.
|
||||
Currently, the only available option is `mem` supported on Linux 3.17+.
|
||||
`mem` enables accumulating events in memory instead of flushing
|
||||
synchronously to a file.
|
||||
|
||||
* `--jfrsync CONFIG` - start Java Flight Recording with the given configuration
|
||||
synchronously with the profiler. The output .jfr file will include all regular
|
||||
JFR events, except that execution samples will be obtained from async-profiler.
|
||||
This option implies `-o jfr`.
|
||||
- `CONFIG` is a predefined JFR profile or a JFR configuration file (.jfc)
|
||||
or a list of JFR events started with `+`
|
||||
|
||||
Example: `asprof -e cpu --jfrsync profile -f combined.jfr 8983`
|
||||
|
||||
* `--fdtransfer` - runs a background process that provides access to perf_events
|
||||
to an unprivileged process. `--fdtransfer` is useful for profiling a process
|
||||
in a container (which lacks access to perf_events) from the host.
|
||||
See [Profiling Java in a container](#profiling-java-in-a-container).
|
||||
|
||||
* `-v`, `--version` - prints the version of profiler library. If PID is specified,
|
||||
gets the version of the library loaded into the given process.
|
||||
|
||||
## Profiling Java in a container
|
||||
|
||||
It is possible to profile Java processes running in a Docker or LXC container
|
||||
both from within a container and from the host system.
|
||||
|
||||
When profiling from the host, `pid` should be the Java process ID in the host
|
||||
namespace. Use `ps aux | grep java` or `docker top <container>` to find
|
||||
the process ID.
|
||||
|
||||
async-profiler should be run from the host by a privileged user - it will
|
||||
automatically switch to the proper pid/mount namespace and change
|
||||
user credentials to match the target process. Also make sure that
|
||||
the target container can access `libasyncProfiler.so` by the same
|
||||
absolute path as on the host.
|
||||
|
||||
By default, Docker container restricts the access to `perf_event_open`
|
||||
syscall. There are 3 alternatives to allow profiling in a container:
|
||||
1. You can modify the [seccomp profile](https://docs.docker.com/engine/security/seccomp/)
|
||||
or disable it altogether with `--security-opt seccomp=unconfined` option. In
|
||||
addition, `--cap-add SYS_ADMIN` may be required.
|
||||
2. You can use "fdtransfer": see the help for `--fdtransfer`.
|
||||
3. Last, you may fall back to `-e ctimer` profiling mode, see [Troubleshooting](#troubleshooting).
|
||||
|
||||
## Restrictions/Limitations
|
||||
|
||||
* macOS profiling is limited to user space code only.
|
||||
|
||||
* On most Linux systems, `perf_events` captures call stacks with a maximum depth
|
||||
of 127 frames. On recent Linux kernels, this can be configured using
|
||||
`sysctl kernel.perf_event_max_stack` or by writing to the
|
||||
`/proc/sys/kernel/perf_event_max_stack` file.
|
||||
|
||||
* Profiler allocates 8kB perf_event buffer for each thread of the target process.
|
||||
Make sure `/proc/sys/kernel/perf_event_mlock_kb` value is large enough
|
||||
(more than `8 * threads`) when running under unprivileged user.
|
||||
Otherwise the message _"perf_event mmap failed: Operation not permitted"_
|
||||
will be printed, and no native stack traces will be collected.
|
||||
|
||||
* You will not see the non-Java frames _preceding_ the Java frames on the
|
||||
stack, unless `--cstack vm` is specified.
|
||||
For example, if `start_thread` called `JavaMain` and then your Java
|
||||
code started running, you will not see the first two frames in the resulting
|
||||
stack. On the other hand, you _will_ see non-Java frames (user and kernel)
|
||||
invoked by your Java code.
|
||||
|
||||
* No Java stacks will be collected if `-XX:MaxJavaStackTraceDepth` is zero
|
||||
or negative. The exception is `--cstack vm` mode, which does not take
|
||||
`MaxJavaStackTraceDepth` into account.
|
||||
|
||||
* Too short profiling interval may cause continuous interruption of heavy
|
||||
system calls like `clone()`, so that it will never complete;
|
||||
see [#97](https://github.com/async-profiler/async-profiler/issues/97).
|
||||
The workaround is simply to increase the interval.
|
||||
|
||||
* When agent is not loaded at JVM startup (by using -agentpath option) it is
|
||||
highly recommended to use `-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints` JVM flags.
|
||||
Without those flags the profiler will still work correctly but results might be
|
||||
less accurate. For example, without `-XX:+DebugNonSafepoints` there is a high chance
|
||||
that simple inlined methods will not appear in the profile. When the agent is attached at runtime,
|
||||
`CompiledMethodLoad` JVMTI event enables debug info, but only for methods compiled after attaching.
|
||||
- [CPU Sampling Engines](docs/CpuSamplingEngines.md)
|
||||
- [Stack Walking Modes](docs/StackWalkingModes.md)
|
||||
- [Advanced Stacktrace Features](docs/AdvancedStacktraceFeatures.md)
|
||||
- [Profiling Non-Java Applications](docs/ProfilingNonJavaApplications.md)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
```
|
||||
Failed to change credentials to match the target process: Operation not permitted
|
||||
```
|
||||
Due to limitation of HotSpot Dynamic Attach mechanism, the profiler must be run
|
||||
by exactly the same user (and group) as the owner of target JVM process.
|
||||
If profiler is run by a different user, it will try to automatically change
|
||||
current user and group. This will likely succeed for `root`, but not for
|
||||
other users, resulting in the above error.
|
||||
|
||||
```
|
||||
Could not start attach mechanism: No such file or directory
|
||||
```
|
||||
The profiler cannot establish communication with the target JVM through UNIX domain socket.
|
||||
|
||||
Usually this happens in one of the following cases:
|
||||
1. Attach socket `/tmp/.java_pidNNN` has been deleted. It is a common
|
||||
practice to clean `/tmp` automatically with some scheduled script.
|
||||
Configure the cleanup software to exclude `.java_pid*` files from deletion.
|
||||
How to check: run `lsof -p PID | grep java_pid`
|
||||
If it lists a socket file, but the file does not exist, then this is exactly
|
||||
the described problem.
|
||||
2. JVM is started with `-XX:+DisableAttachMechanism` option.
|
||||
3. `/tmp` directory of Java process is not physically the same directory
|
||||
as `/tmp` of your shell, because Java is running in a container or in
|
||||
`chroot` environment. `jattach` attempts to solve this automatically,
|
||||
but it might lack the required permissions to do so.
|
||||
Check `strace build/jattach PID properties`
|
||||
4. JVM is busy and cannot reach a safepoint. For instance,
|
||||
JVM is in the middle of long-running garbage collection.
|
||||
How to check: run `kill -3 PID`. Healthy JVM process should print
|
||||
a thread dump and heap info in its console.
|
||||
|
||||
```
|
||||
Target JVM failed to load libasyncProfiler.so
|
||||
```
|
||||
The connection with the target JVM has been established, but JVM is unable to load profiler shared library.
|
||||
Make sure the user of JVM process has permissions to access `libasyncProfiler.so` by exactly the same absolute path.
|
||||
For more information see [#78](https://github.com/async-profiler/async-profiler/issues/78).
|
||||
|
||||
```
|
||||
No access to perf events. Try --fdtransfer or --all-user option or 'sysctl kernel.perf_event_paranoid=1'
|
||||
```
|
||||
or
|
||||
```
|
||||
Perf events unavailable
|
||||
```
|
||||
`perf_event_open()` syscall has failed.
|
||||
|
||||
Typical reasons include:
|
||||
1. `/proc/sys/kernel/perf_event_paranoid` is set to restricted mode (>=2).
|
||||
2. seccomp disables `perf_event_open` API in a container.
|
||||
3. OS runs under a hypervisor that does not virtualize performance counters.
|
||||
4. perf_event_open API is not supported on this system, e.g. WSL.
|
||||
|
||||
For permissions-related reasons (such as 1 and 2), using `--fdtransfer` while running the profiler
|
||||
as a privileged user may solve the issue.
|
||||
|
||||
If changing the configuration is not possible, you may fall back to
|
||||
`-e ctimer` profiling mode. It is similar to `cpu` mode, but does not
|
||||
require perf_events support. As a drawback, there will be no kernel
|
||||
stack traces.
|
||||
|
||||
```
|
||||
No AllocTracer symbols found. Are JDK debug symbols installed?
|
||||
```
|
||||
The OpenJDK debug symbols are required for allocation profiling.
|
||||
See [Installing Debug Symbols](#installing-debug-symbols) for more details.
|
||||
If the error message persists after a successful installation of the debug symbols,
|
||||
it is possible that the JDK was upgraded when installing the debug symbols.
|
||||
In this case, profiling any Java process which had started prior to the installation
|
||||
will continue to display this message, since the process had loaded
|
||||
the older version of the JDK which lacked debug symbols.
|
||||
Restarting the affected Java processes should resolve the issue.
|
||||
|
||||
```
|
||||
VMStructs unavailable. Unsupported JVM?
|
||||
```
|
||||
JVM shared library does not export `gHotSpotVMStructs*` symbols -
|
||||
apparently this is not a HotSpot JVM. Sometimes the same message
|
||||
can be also caused by an incorrectly built JDK
|
||||
(see [#218](https://github.com/async-profiler/async-profiler/issues/218)).
|
||||
In these cases installing JDK debug symbols may solve the problem.
|
||||
|
||||
```
|
||||
Could not parse symbols from <libname.so>
|
||||
```
|
||||
Async-profiler was unable to parse non-Java function names because of
|
||||
the corrupted contents in `/proc/[pid]/maps`. The problem is known to
|
||||
occur in a container when running Ubuntu with Linux kernel 5.x.
|
||||
This is the OS bug, see https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1843018.
|
||||
|
||||
```
|
||||
Could not open output file
|
||||
```
|
||||
Output file is written by the target JVM process, not by the profiler script.
|
||||
Make sure the path specified in `-f` option is correct and is accessible by the JVM.
|
||||
For known issues faced while running async-profiler and their detailed troubleshooting,
|
||||
please refer [here](docs/Troubleshooting.md).
|
||||
|
||||
9
SECURITY.md
Normal file
@@ -0,0 +1,9 @@
|
||||
## Reporting Security Issues
|
||||
|
||||
We take all security reports seriously.
|
||||
When we receive such reports,
|
||||
we will investigate and subsequently address
|
||||
any potential vulnerabilities as quickly as possible.
|
||||
If you discover a potential security issue in this project,
|
||||
please notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/).
|
||||
Please do *not* create a public GitHub issue in this project.
|
||||
|
Before Width: | Height: | Size: 68 KiB |
3
docker/alpaquita.Dockerfile
Normal file
@@ -0,0 +1,3 @@
|
||||
FROM public.ecr.aws/bellsoft/alpaquita-linux-gcc:15.2-musl
|
||||
|
||||
RUN apk add --no-cache liberica21-jdk util-linux-misc curl
|
||||
3
docker/alpine.Dockerfile
Normal file
@@ -0,0 +1,3 @@
|
||||
FROM public.ecr.aws/docker/library/amazoncorretto:11-alpine-jdk
|
||||
|
||||
RUN apk add --no-cache make gcc g++ linux-headers musl-dev util-linux patchelf gcovr bash tar curl
|
||||
37
docker/amazonlinux2.Dockerfile
Normal file
@@ -0,0 +1,37 @@
|
||||
FROM public.ecr.aws/amazonlinux/amazonlinux:2
|
||||
|
||||
RUN amazon-linux-extras enable python3.8
|
||||
|
||||
RUN yum update -y && yum install -y git make python38 gcc10 gcc10-c++ binutils tar
|
||||
|
||||
ARG node_version=20.19.1
|
||||
ARG node_sha256=babcd5b9e3216510b89305e6774bcdb2905ca98ff60028b67f163eb8296b6665
|
||||
RUN curl -L --output node.tar.gz https://github.com/nodejs/node/archive/refs/tags/v${node_version}.tar.gz
|
||||
RUN echo ${node_sha256} node.tar.gz | sha256sum -c
|
||||
RUN mkdir /node
|
||||
RUN tar xf node.tar.gz -C /node --strip-components=1
|
||||
WORKDIR /node
|
||||
|
||||
ENV CC=gcc10-cc
|
||||
ENV CXX=gcc10-c++
|
||||
RUN ./configure
|
||||
RUN make -j4 -s > /dev/null
|
||||
RUN make install
|
||||
|
||||
FROM public.ecr.aws/amazonlinux/amazonlinux:2
|
||||
|
||||
COPY --from=0 /usr/local/bin/node /usr/local/bin/node
|
||||
RUN amazon-linux-extras enable python3.8 && \
|
||||
yum update -y && \
|
||||
yum install -y gcc-c++ binutils make java-11-amazon-corretto patchelf tar python38 && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum && \
|
||||
python -m ensurepip && \
|
||||
python -m pip install gcovr
|
||||
|
||||
ENV NODE_JS_LOCATION=/__e/node20
|
||||
RUN cat <<EOF > /root/setup.sh
|
||||
#!/bin/sh
|
||||
mkdir -p "$NODE_JS_LOCATION/bin"
|
||||
ln --force --symbolic "/usr/local/bin/node" "$NODE_JS_LOCATION/bin/node"
|
||||
EOF
|
||||
8
docker/amazonlinux2023.Dockerfile
Normal file
@@ -0,0 +1,8 @@
|
||||
FROM public.ecr.aws/amazonlinux/amazonlinux:2023
|
||||
|
||||
RUN yum update -y && \
|
||||
yum install -y binutils findutils make tar gcc-c++ util-linux && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum && \
|
||||
python3 -m ensurepip && \
|
||||
python3 -m pip install gcovr
|
||||
10
docker/code-check.Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
||||
# Image for all tasks related to static code analysis in async-profiler
|
||||
FROM public.ecr.aws/docker/library/amazoncorretto:11-alpine-jdk
|
||||
|
||||
ADD --chmod=555 https://raw.githubusercontent.com/llvm/llvm-project/67be4fe3d5fd986a3149de3806bcf2c92320015e/clang-tools-extra/clang-tidy/tool/clang-tidy-diff.py /usr/bin/
|
||||
RUN apk add --no-cache clang-extra-tools linux-headers make python3 git py3-pip bash
|
||||
# Needed by clang-tidy-diff.py to merge multiple results in one file.
|
||||
# '--break-system-packages' is needed because Alpine does not like other package managers than 'apk' ('pip' in this case) to install
|
||||
# software globally, but it's safe to do in this case.
|
||||
RUN pip install --break-system-packages pyyaml
|
||||
ENV CPLUS_INCLUDE_PATH="/usr/lib/jvm/java-11-amazon-corretto/include:/usr/lib/jvm/java-11-amazon-corretto/include/linux"
|
||||
32
docker/debian.Dockerfile
Normal file
@@ -0,0 +1,32 @@
|
||||
# Image for building async-profiler release packages
|
||||
|
||||
# Stage 0: download and build musl
|
||||
FROM public.ecr.aws/debian/debian:10-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
sudo libicu-dev patchelf curl make g++ openjdk-11-jdk-headless gcovr && \
|
||||
rm -rf /var/cache/apt /var/lib/apt/lists/*
|
||||
|
||||
ARG musl_src=musl-1.2.5
|
||||
ARG musl_sha256=a9a118bbe84d8764da0ea0d28b3ab3fae8477fc7e4085d90102b8596fc7c75e4
|
||||
|
||||
ADD https://musl.libc.org/releases/${musl_src}.tar.gz /
|
||||
RUN echo ${musl_sha256} ${musl_src}.tar.gz | sha256sum -c
|
||||
|
||||
RUN ["/bin/bash", "-c", "\
|
||||
tar xfz ${musl_src}.tar.gz && \
|
||||
cd /${musl_src} && \
|
||||
./configure --disable-shared --prefix=/usr/local/musl && \
|
||||
make -j`nproc` && make install && make clean && \
|
||||
ln -s /usr/include/$(arch)-linux-gnu/asm /usr/include/{asm-generic,linux} /usr/local/musl/include/"]
|
||||
|
||||
# Stage 1: install build tools + copy musl toolchain from the previous step
|
||||
FROM public.ecr.aws/debian/debian:10-slim
|
||||
|
||||
# The following command should be exactly the same as at stage 0 to benefit from caching.
|
||||
# libicu-dev is needed for the github actions runner
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
sudo libicu-dev patchelf curl make g++ openjdk-11-jdk-headless gcovr && \
|
||||
rm -rf /var/cache/apt /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=0 /usr/local/musl /usr/local/musl
|
||||
35
docs/AdvancedStacktraceFeatures.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Advanced Stacktrace Features
|
||||
|
||||
## Display JIT compilation task
|
||||
|
||||
Async-profiler samples JIT compiler threads just the same way as Java threads, and hence can show
|
||||
CPU percentage spent on JIT compilation. At the same time, Java methods are different:
|
||||
some take more resources to compile, other take less. Furthermore, there are cases when
|
||||
a bug in C2 compiler causes a JIT thread to stuck in an infinite loop consuming 100% CPU.
|
||||
Async-profiler can highlight which particular Java methods take most CPU time to compile.
|
||||
|
||||

|
||||
|
||||
The feature can be enabled with the option `-F comptask` (or its agent equivalent `features=comptask`).
|
||||
|
||||
## Display actual implementation in vtable
|
||||
|
||||
In some applications, a significant amount of CPU time is spent on dispatching megamorphic virtual/interface calls.
|
||||
async-profiler shows a pseudo-frame on top of v/itable stub with the actual type of object the virtual method is
|
||||
called on. This should make clear the proportion of different receivers for the particular call site.
|
||||
|
||||

|
||||
|
||||
The feature can be enabled with the option `-F vtable` (or its agent equivalent `features=vtable`).
|
||||
|
||||
## Display instruction addresses
|
||||
|
||||
Sometimes, for low-level performance analysis, it is important to know where exactly
|
||||
CPU time is spent inside a method. As an intermediate step to the instruction-level
|
||||
profiling, async-profiler provides an option to record PC address of the currently
|
||||
running method for each execution sample. In this case, each stack trace will include
|
||||
a synthetic frame with the address at the top of every stack trace.
|
||||
|
||||

|
||||
|
||||
The feature can be enabled with the option `-F pcaddr` (or its agent equivalent `features=pcaddr`).
|
||||
177
docs/ConverterUsage.md
Normal file
@@ -0,0 +1,177 @@
|
||||
# Converter Usage
|
||||
|
||||
async-profiler provides `jfrconv` utility to convert between different profile output formats.
|
||||
`jfrconv` can be found at the same location as the `asprof` binary. Converter is also available
|
||||
as a standalone Java application: [`jfr-converter.jar`](https://github.com/async-profiler/async-profiler/releases/latest/download/jfr-converter.jar).
|
||||
|
||||
## Supported conversions
|
||||
|
||||
The tool can convert several source formats into various outputs. The conversion capabilities are summarized below:
|
||||
|
||||
| Source format | to html | to collapsed | to pprof | to pb.gz | to heatmap | to otlp |
|
||||
| ------------- | ------- | ------------ | -------- | -------- | ---------- | ------- |
|
||||
| jfr | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| html | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
|
||||
| collapsed | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
jfrconv [options] <input> [<input>...] <output>
|
||||
```
|
||||
|
||||
The output format specified can be only one at a time for conversion from one format to another.
|
||||
|
||||
```
|
||||
Conversion options:
|
||||
-o --output FORMAT, -o can be omitted if the output file extension unambiguously determines the format, e.g. profile.collapsed
|
||||
|
||||
FORMAT can be any of the following:
|
||||
# collapsed: This is a collection of call stacks, where each line is a semicolon separated
|
||||
list of frames followed by a counter. This is used by the FlameGraph script to
|
||||
generate the FlameGraph visualization of the profile data.
|
||||
|
||||
# html: FlameGraph is a hierarchical representation of call traces of the profiled
|
||||
software in a color coded format that helps to identify a particular resource
|
||||
usage like CPU and memory for the application.
|
||||
|
||||
# pprof: pprof is a profiling visualization and analysis tool from Google. More details on
|
||||
pprof on the official github page https://github.com/google/pprof.
|
||||
|
||||
# pb.gz: This is a compressed version of pprof output.
|
||||
|
||||
# heatmap: A single page interactive heatmap that allows to explore profiling events
|
||||
on a timeline.
|
||||
|
||||
# otlp: OpenTelemetry profile format.
|
||||
|
||||
Differential Flame Graph:
|
||||
--diff <base-profile> <new-profile>
|
||||
|
||||
JFR options:
|
||||
--cpu Generate only CPU profile during conversion
|
||||
--cpu-time Generate only CPU profile, using CPUTimeSample events
|
||||
--wall Generate only Wall clock profile during conversion
|
||||
--alloc Generate only Allocation profile during conversion
|
||||
--live Build allocation profile from live objects only during conversion
|
||||
--nativemem Generate native memory allocation profile
|
||||
--leak Only include memory leaks in nativemem
|
||||
--tail RATIO Ignore tail allocations for leak profiling (10% by default)
|
||||
--lock Generate only lock contention profile during conversion
|
||||
--nativelock Generate only native (pthread) lock contention profile
|
||||
--trace Convert only MethodTrace events
|
||||
-t --threads Split stack traces by threads
|
||||
-s --state LIST Filter thread states: runnable, sleeping, default. State name is case insensitive
|
||||
and can be abbreviated, e.g. -s r
|
||||
--classify Classify samples into predefined categories
|
||||
--total Accumulate total value (time, bytes, etc.) instead of samples
|
||||
--lines Show line numbers
|
||||
--bci Show bytecode indices
|
||||
--simple Simple class names instead of fully qualified names
|
||||
--norm Normalize names of hidden classes/lambdas, e.g. Original JFR transforms
|
||||
lambda names to something like pkg.ClassName$$Lambda+0x00007f8177090218/543846639
|
||||
which gets normalized to pkg.ClassName$$Lambda
|
||||
--dot Dotted class names, e.g. java.lang.String instead of java/lang/String
|
||||
--from TIME Start time in ms (absolute or relative)
|
||||
--to TIME End time in ms (absolute or relative)
|
||||
TIME can be:
|
||||
# an absolute timestamp specified in millis since epoch;
|
||||
# an absolute time in hh:mm:ss or yyyy-MM-dd'T'hh:mm:ss format;
|
||||
# a relative time from the beginning of recording;
|
||||
# a relative time from the end of recording (a negative number).
|
||||
--latency MS Retain only samples within MethodTraces of at least MS milliseconds
|
||||
|
||||
Flame Graph options:
|
||||
--title STRING Convert to Flame Graph with provided title
|
||||
--minwidth X Skip frames smaller than X%
|
||||
--grain X Coarsen Flame Graph to the given grain size
|
||||
--skip N Skip N bottom frames
|
||||
-r --reverse Reverse stack traces (defaults to icicle graph)
|
||||
-i --inverted Toggles the layout for reversed stacktraces from icicle to flamegraph
|
||||
and for default stacktraces from flamegraph to icicle
|
||||
-I --include REGEX Include only stacks with the specified frames, e.g. -I 'MyApplication\.main' -I 'VMThread.*'
|
||||
-X --exclude REGEX Exclude stacks with the specified frames, e.g. -X '.*pthread_cond_(wait|timedwait).*'
|
||||
--highlight REGEX Highlight frames matching the given pattern
|
||||
```
|
||||
|
||||
See the [profiler options documentation](ProfilerOptions.md#options-applicable-to-flamegraph-and-tree-view-outputs-only) for details on the `--reverse` and `--inverted` options.
|
||||
|
||||
## jfrconv examples
|
||||
|
||||
`jfrconv` utility is provided in `bin` directory of the async-profiler package.
|
||||
It requires JRE to be installed on the system.
|
||||
|
||||
### Generate Flame Graph from JFR
|
||||
|
||||
If no output file is specified, it defaults to a Flame Graph output.
|
||||
|
||||
```
|
||||
jfrconv foo.jfr
|
||||
```
|
||||
|
||||
Profiling in JFR mode allows multi-mode profiling. So the command above will generate a Flame Graph
|
||||
output, however, for a multi-mode profile output with both `cpu` and `wall-clock` events, the
|
||||
Flame Graph will have an aggregation of both in the view. Such a view wouldn't make much sense and
|
||||
hence it is advisable to use JFR conversion filter options like `--cpu` to filter out events
|
||||
during a conversion.
|
||||
|
||||
```
|
||||
jfrconv --cpu foo.jfr
|
||||
|
||||
# which is equivalent to:
|
||||
# jfrconv --cpu -o html foo.jfr foo.html
|
||||
```
|
||||
|
||||
for HTML output as HTML is the default format for conversion from JFR.
|
||||
|
||||
### Flame Graph options
|
||||
|
||||
To add a custom title to the generated Flame Graph, use `--title`, which has the default value `Flame Graph`:
|
||||
|
||||
```
|
||||
jfrconv --cpu foo.jfr foo.html -r --title "Custom Title"
|
||||
```
|
||||
|
||||
### Differential Flame Graph
|
||||
|
||||
To find performance regressions, it may be useful to compare current profile
|
||||
to a previous one that serves as a baseline. Differential Flame Graph
|
||||
visualizes such a comparsion with a special color scheme:
|
||||
|
||||
- Red color denotes frames with more samples comparing to the baseline (i.e. regression);
|
||||
- Blue is for frames with less samples;
|
||||
- Yellow are new frames that were absent in the baseline.
|
||||
|
||||
The more intense the color, the larger the delta.
|
||||
For each different frame, the delta value is displayed in a tooltip.
|
||||
|
||||

|
||||
|
||||
Differential Flame Graph takes the shape of the current profile:
|
||||
all frames have exactly the same size as in the normal Flame Graph.
|
||||
This means, frames that exist only in the base profile will not be visible.
|
||||
To see such frames, create another differential Flame Graph,
|
||||
swapping the base and the current input file.
|
||||
|
||||
To create differential Flame Graph, run `jfrconv --diff` with two input files:
|
||||
basline profile and new profile. Both files can be in JFR, HTML, or collapsed format.
|
||||
Other converter options work as usual.
|
||||
|
||||
```
|
||||
jfrconv --cpu --diff baseline.jfr new.jfr diff.html
|
||||
```
|
||||
|
||||
Output file name is optional. If omitted, `jfrconv` takes the name
|
||||
of the second input file, replacing its extension with `.diff.html`.
|
||||
|
||||
## Standalone converter examples
|
||||
|
||||
Standalone converter jar is provided in
|
||||
[Download](https://github.com/async-profiler/async-profiler/?tab=readme-ov-file#Download).
|
||||
It accepts the same parameters as `jfrconv`.
|
||||
|
||||
Below is an example usage:
|
||||
|
||||
```
|
||||
java -jar jfr-converter.jar --cpu foo.jfr --reverse --title "Application CPU profile"
|
||||
```
|
||||
76
docs/CpuSamplingEngines.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# CPU Sampling Engines
|
||||
|
||||
Async-profiler has three options for CPU profiling: `-e cpu`, `-e itimer` and `-e ctimer`.
|
||||
|
||||
## cpu
|
||||
|
||||
`cpu` mode measures CPU time spent by the running threads. For example,
|
||||
if an application uses 2 cpu cores, each with 30% utilization, and the sampling interval is
|
||||
10ms, then the profiler will collect about `2 * 0.3 * 100 = 60` samples per second.
|
||||
In other words, 1 profiling sample means that one CPU core was actively running for N nanoseconds,
|
||||
where N is the profiling interval.
|
||||
|
||||
On Linux, `cpu` mode relies on [perf_events](https://man7.org/linux/man-pages/man2/perf_event_open.2.html).
|
||||
One `perf_event` descriptor is created for each running thread and configured to generate a signal
|
||||
every `N` nanoseconds of CPU time. This is the most accurate CPU sampler available in async-profiler
|
||||
and the only one that can obtain kernel stack traces. It, however, comes with certain restrictions.
|
||||
|
||||
Most importantly, OS configuration may limit access to `perf_events` API, e.g.,
|
||||
by `kernel.perf_event_paranoid` sysctl or by seccomp (which is often the case in a Docker container).
|
||||
If `perf_events` are available, but kernel symbols are hidden (e.g., by `kernel.kptr_resitrct` setting),
|
||||
async-profiler continues to use `perf_events`, emits a warning and does not show kernel stack traces.
|
||||
|
||||
Another important thing to consider is that `cpu` sampling engine allocates a descriptor per thread.
|
||||
This means, if an application has too many threads and OS limit for the maximum number of open descriptors
|
||||
(`ulimit -n`) is too low, an application may run out of file descriptors. The workaround
|
||||
is to simply increase file descriptor limit.
|
||||
|
||||
## itimer
|
||||
|
||||
`itimer` mode is based on [setitimer(ITIMER_PROF)](https://man7.org/linux/man-pages/man2/setitimer.2.html)
|
||||
syscall, which ideally generates a signal every given interval of CPU time consumed by the process.
|
||||
Ideally, both `itimer` and `cpu` should collect the same number of samples. Typically,
|
||||
profiles indeed look very similar. However, in [some cases](https://github.com/golang/go/issues/14434),
|
||||
`cpu` profile appears more accurate, since a signal is delivered exactly to the thread
|
||||
that overflowed a hardware counter. In contrast, `itimer` has the following limitations:
|
||||
|
||||
- Only one `itimer` signal can be delivered to a process at a time.
|
||||
- Signals are not distributed evenly between running threads.
|
||||
- Sampling resolution is limited by the size of [jiffies](https://man7.org/linux/man-pages/man7/time.7.html).
|
||||
|
||||
`itimer` profiles may be even less accurate on macOS, where `itimer` signals are often biased
|
||||
towards system calls.
|
||||
|
||||
The main advantage of `itimer` is that it works in containers and does not consume file descriptors.
|
||||
|
||||
## ctimer
|
||||
|
||||
`ctimer` is a Linux-specific alternative for `cpu` profiling mode to overcome limitations
|
||||
of `perf_events`, such as `perf_event_paraniod` setting, seccomp restriction or a low limit
|
||||
for the number of open file descriptors. `ctimer` mode relies on
|
||||
[timer_create](https://man7.org/linux/man-pages/man2/timer_create.2.html) API.
|
||||
It combines benefits of `-e cpu` and `-e itimer`, except that it does not allow collecting kernel stacks.
|
||||
|
||||
Like with `itimer`, `ctimer` resolution is limited by the size of the jiffy -
|
||||
kernel `HZ` constant, which is typically equal to 100 or 250, meaning that the minimum supported
|
||||
profiling interval is 10ms or 4ms respectively.
|
||||
|
||||
## Summary
|
||||
|
||||
Here is a summary of advantages and drawbacks of all CPU profiling engines:
|
||||
|
||||
| Attribute | cpu (perf_events) | itimer | ctimer |
|
||||
| --------------------------------- | :---------------: | :----: | :----: |
|
||||
| Can collect kernel stack traces | ✅ | ❌ | ❌ |
|
||||
| High resolution | ✅ | ❌ | ❌ |
|
||||
| Accuracy / fairness | ✅ | ❌ | 🆗 |
|
||||
| Works in containers by default | ❌ | ✅ | ✅ |
|
||||
| Does not consume file descriptors | ❌ | ✅ | ✅ |
|
||||
| macOS support | ❌ | ✅ | ❌ |
|
||||
|
||||
When using `-e cpu` on Linux, async-profiler automatically checks for `perf_events` availability
|
||||
by trying to create a dummy perf_event. If kernel-space profiling is not available,
|
||||
async-profiler transparently falls back to `ctimer` mode. To force using `perf_events`
|
||||
for user-space only profiling, specify `-e cpu-clock --all-user` instead of `-e cpu`.
|
||||
|
||||
The actual profiling engine (`perf_events`, `ctimer`, etc.) is now recorded in `jfr` output.
|
||||
85
docs/FlamegraphInterpretation.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# FlameGraph interpretation
|
||||
|
||||
To interpret a flame graph, the best way forward is to understand how it is created.
|
||||
|
||||
## Example application to profile
|
||||
|
||||
Let's take the below example:
|
||||
|
||||
```
|
||||
main() {
|
||||
// some business logic
|
||||
func3() {
|
||||
// some business logic
|
||||
func7();
|
||||
}
|
||||
|
||||
// some business logic
|
||||
func4();
|
||||
|
||||
// some business logic
|
||||
func1() {
|
||||
// some business logic
|
||||
func5();
|
||||
}
|
||||
|
||||
// some business logic
|
||||
func2() {
|
||||
// some business logic
|
||||
func6() {
|
||||
// some business logic
|
||||
func8(); // cpu intensive work here
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Profiler sampling
|
||||
|
||||
Profiling starts by taking samples `X` times per second. Whenever a sample is taken,
|
||||
the current call stack for it is saved. The diagram below shows the unsorted sampling view
|
||||
before the sorting and aggregation takes place.
|
||||
|
||||

|
||||
|
||||
Below are the sampling numbers:
|
||||
|
||||
- `func3()->func7()`: 3 samples
|
||||
- `func4()`: 1 sample
|
||||
- `func1()->func5()`: 2 samples
|
||||
- `func2()->func8()`: 4 samples
|
||||
- `func2()->func6()`: 1 sample
|
||||
|
||||
## Sorting samples
|
||||
|
||||
Samples are then alphabetically sorted at the base level just after root (or main method) of the application.
|
||||
|
||||

|
||||
|
||||
Note that X-axis is no longer a timeline. Flame graph does not preserve information
|
||||
on _when_ a particular stack trace was taken, it only indicates _how often_
|
||||
a stack trace was observed during profiling.
|
||||
|
||||
## Aggregated view
|
||||
|
||||
The blocks for the same functions at each level of stack depth are then stitched together
|
||||
to get an aggregated view of the flame graph.
|
||||

|
||||
|
||||
In this example, except `func4()`, no other function actually consumes
|
||||
any resource at the base level of stack depth. `func5()`, `func6()`,
|
||||
`func7()` and `func8()` are the ones consuming resources, with `func8()`
|
||||
being a likely candidate for performance optimization.
|
||||
|
||||
CPU utilization is the most common use case for flame graphs, however,
|
||||
there are other modes of profiling like allocation profiling to view
|
||||
heap utilization and wall-clock profiling to view latency.
|
||||
|
||||
[More on various modes of profiling](ProfilingModes.md)
|
||||
|
||||
## Understanding FlameGraph colors
|
||||
|
||||
Color is another flame graph dimension that may be used to encode additional information
|
||||
about each frame. Colors may have different meaning in various flame graph implementations.
|
||||
async-profiler uses the following palette to differentiate frame types:
|
||||
|
||||

|
||||
110
docs/GettingStarted.md
Normal file
@@ -0,0 +1,110 @@
|
||||
# Getting started guide
|
||||
|
||||
## Before profiling
|
||||
|
||||
As of Linux 4.6, capturing kernel call stacks using `perf_events` from a non-root
|
||||
process requires setting two kernel parameters. You can set them using sysctl as follows:
|
||||
|
||||
```
|
||||
# sysctl kernel.perf_event_paranoid=1
|
||||
# sysctl kernel.kptr_restrict=0
|
||||
```
|
||||
|
||||
For better profiling accuracy, it is [recommended](Troubleshooting.md#known-limitations)
|
||||
to start the JVM with `-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints` flags,
|
||||
unless async-profiler is loaded at JVM startup.
|
||||
|
||||
## Find a process to profile
|
||||
|
||||
Common ways to find the target process include using
|
||||
[`jps`](https://docs.oracle.com/en/java/javase/21/docs/specs/man/jps.html) and
|
||||
[`pgrep`](https://man7.org/linux/man-pages/man1/pgrep.1.html).
|
||||
For example, to list all Java process IDs with their full command lines, run
|
||||
`pgrep -a java`. The next section includes an example using `jps`.
|
||||
|
||||
## Start profiling
|
||||
|
||||
async-profiler works in the context of the target Java application,
|
||||
i.e. it runs as an agent in the process being profiled.
|
||||
`asprof` is a tool to attach and control the agent.
|
||||
|
||||
A typical workflow would be to launch your Java application, attach
|
||||
the agent and start profiling, exercise your performance scenario, and
|
||||
then stop profiling. The agent's output, including the profiling results, will
|
||||
be displayed on the console where you've started `asprof`.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
$ jps
|
||||
9234 Jps
|
||||
8983 Computey
|
||||
$ asprof start 8983
|
||||
$ asprof stop 8983
|
||||
```
|
||||
|
||||
The following may be used in lieu of the `pid` (8983):
|
||||
|
||||
- The keyword `jps`, which will find `pid` automatically, if there is a single Java process running in the system.
|
||||
- The application name as it appears in the `jps` output: e.g. `Computey`
|
||||
|
||||
Alternatively, you may specify `-d` (duration) argument to profile
|
||||
the application for a fixed period of time with a single command.
|
||||
|
||||
```
|
||||
$ asprof -d 30 8983
|
||||
```
|
||||
|
||||
By default, the profiling frequency is 100Hz (every 10ms of CPU time).
|
||||
Here is a sample output of `asprof`:
|
||||
|
||||
```
|
||||
--- Execution profile ---
|
||||
Total samples: 687
|
||||
Unknown (native): 1 (0.15%)
|
||||
|
||||
--- 6790000000 (98.84%) ns, 679 samples
|
||||
[ 0] Primes.isPrime
|
||||
[ 1] Primes.primesThread
|
||||
[ 2] Primes.access$000
|
||||
[ 3] Primes$1.run
|
||||
[ 4] java.lang.Thread.run
|
||||
|
||||
... a lot of output omitted for brevity ...
|
||||
|
||||
ns percent samples top
|
||||
---------- ------- ------- ---
|
||||
6790000000 98.84% 679 Primes.isPrime
|
||||
40000000 0.58% 4 __do_softirq
|
||||
|
||||
... more output omitted ...
|
||||
```
|
||||
|
||||
This indicates that the hottest method was `Primes.isPrime`, and the hottest
|
||||
call stack leading to it comes from `Primes.primesThread`.
|
||||
|
||||
## Other use cases
|
||||
|
||||
- [Launching as an agent](IntegratingAsyncProfiler.md#launching-as-an-agent)
|
||||
- [Java API](IntegratingAsyncProfiler.md#using-java-api)
|
||||
- [IntelliJ IDEA](IntegratingAsyncProfiler.md#intellij-idea)
|
||||
|
||||
## FlameGraph visualization
|
||||
|
||||
async-profiler provides out-of-the-box [Flame Graph](https://www.brendangregg.com/flamegraphs.html) support.
|
||||
Specify `-o flamegraph` argument to dump profiling results as an interactive HTML Flame Graph.
|
||||
Also, Flame Graph output format will be chosen automatically if the target filename ends with `.html`.
|
||||
|
||||
```
|
||||
$ jps
|
||||
9234 Jps
|
||||
8983 Computey
|
||||
$ asprof -d 30 -f /tmp/flamegraph.html 8983
|
||||
```
|
||||
|
||||
[](https://htmlpreview.github.io/?https://github.com/async-profiler/async-profiler/blob/master/.assets/html/flamegraph.html)
|
||||
|
||||
The flame graph html can be opened in any browser of your choice for further interpretation.
|
||||
|
||||
Please refer to [Interpreting a Flame Graph](FlamegraphInterpretation.md)
|
||||
to understand more on how to interpret a Flame Graph.
|
||||
94
docs/Heatmap.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Heatmap
|
||||
|
||||
Problems to be solved with a profiler can be divided into two large categories:
|
||||
|
||||
1. Optimization of overall resource usage.
|
||||
2. Troubleshooting of intermittent performance issues.
|
||||
|
||||
While flame graphs are handy for the first type of problems, they are not very helpful
|
||||
for analyzing transient anomalies because they provide an aggregated view that lacks
|
||||
any timeline information. To address the second type of problems, async-profiler offers
|
||||
a converter from JFR format to an interactive heatmap in the form of a single-page HTML file.
|
||||
|
||||
Heatmap is an alternative representation of profile data that preserves timestamps
|
||||
of particular samples. Essentially, it's a two-dimensional timeline composed of
|
||||
colored blocks. Each block represents a short period of time (usually in the range of
|
||||
milliseconds to seconds) with its color being the third dimension: the more intense
|
||||
the color, the more events happened in a given period of time.
|
||||
|
||||

|
||||
|
||||
The idea of heatmaps was borrowed from [FlameScope](https://github.com/Netflix/flamescope),
|
||||
however, FlameScope targets short profiling intervals up to a few minutes, whereas
|
||||
async-profiler implementation is capable of visualizing 24-hour recordings
|
||||
with the granularity of 20 milliseconds. Moreover, heatmaps produced by async-profiler
|
||||
are serverless: they are standalone self-contained HTML files that can be easily shared
|
||||
and viewed without additional software besides a browser.
|
||||
|
||||
## Heatmap features
|
||||
|
||||
### Whole day profile
|
||||
|
||||
Heatmaps are optimized for information density. Full day of continuous profiling
|
||||
can be presented on a single image, where an engineer can spot regular activity
|
||||
patterns as well as anomalies at a glance.
|
||||
|
||||
Heatmaps are also optimized for footprint. Specialized compression algorithms
|
||||
can pack 1 GB original JFR recording to an HTML page of 10-15 MB in size.
|
||||
|
||||

|
||||
|
||||
### Scale / zoom
|
||||
|
||||
Depending on the recording duration and level of detail you are interested in,
|
||||
you can switch between 3 available scales. On the largest scale, each vertical line
|
||||
represents 5 minutes of wall clock time, with each square corresponding to
|
||||
5 second interval. On the finest scale, each square corresponds to 20 milliseconds,
|
||||
allowing you to analyze profiling samples with a high resolution.
|
||||
|
||||

|
||||
|
||||
### Instant flame graphs
|
||||
|
||||
A click on any heatmap square displays a flame graph for this specific time interval.
|
||||
|
||||

|
||||
|
||||
Hold mouse button to select an arbitrary time range on a heatmap.
|
||||
A flame graph for the given time range will be built automatically.
|
||||
|
||||

|
||||
|
||||
### Compare time ranges
|
||||
|
||||
Select target time range as described above. Holding `Ctrl` key,
|
||||
move mouse pointer to choose another time range that will serve as a baseline.
|
||||
You will then get a differential flame graph highlighting stacks
|
||||
that were seen more often in the target time range comparing to the baseline.
|
||||
|
||||

|
||||
|
||||
### Search
|
||||
|
||||
Press `Ctrl+F` and enter a regex to search on the entire heatmap.
|
||||
Time intervals containing matched stacks will be highlighted on a heatmap in blue.
|
||||
Matching frames, if any, will be also highlighted on a flame graph.
|
||||
|
||||
`Ctrl+Shift+F` does the same, except that a flame graph will
|
||||
retain stacks with matching frames only. All other stacks will be filtered out.
|
||||
|
||||

|
||||
|
||||
## Producing heatmaps
|
||||
|
||||
Heatmaps can only be generated from recordings in JFR format.
|
||||
Run [`jfrconv`](ConverterUsage.md) tool with `-o heatmap` option.
|
||||
|
||||
Standard `jfrconv` options (`--cpu`, `--alloc`, `--from`/`--to`, `--simple`, etc.)
|
||||
are also applicable to heatmaps.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
jfrconv --cpu -o heatmap profiler.jfr heatmap-cpu.html
|
||||
```
|
||||
65
docs/IntegratingAsyncProfiler.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Integrating async-profiler
|
||||
|
||||
## Launching as an agent
|
||||
|
||||
If you need to profile some code as soon as the JVM starts up, instead of using `asprof`,
|
||||
it is possible to attach async-profiler as an agent on the command line. For example:
|
||||
|
||||
```
|
||||
$ java -agentpath:/path/to/libasyncProfiler.so=start,event=cpu,file=profile.html ...
|
||||
```
|
||||
|
||||
On macOS, the library name is `libasyncProfiler.dylib` instead of `libasyncProfiler.so`.
|
||||
|
||||
Agent library is configured through the JVMTI argument interface.
|
||||
The argument string is a comma-separated list of [profiler options](ProfilerOptions.md):
|
||||
|
||||
```
|
||||
option[=value],option[=value]...
|
||||
```
|
||||
|
||||
`asprof` internally converts command line arguments to the above format and attaches
|
||||
`libasyncProfiler.so` agent to a running process.
|
||||
|
||||
Another important use of attaching async-profiler as an agent is for continuous profiling.
|
||||
|
||||
## Using Java API
|
||||
|
||||
async-profiler can be controlled programmatically using Java API. The corresponding Java library
|
||||
is published to Maven Central. You can [include it](https://mvnrepository.com/artifact/tools.profiler/async-profiler/latest)
|
||||
just like any other Maven dependency:
|
||||
|
||||
```
|
||||
<dependency>
|
||||
<groupId>tools.profiler</groupId>
|
||||
<artifactId>async-profiler</artifactId>
|
||||
<version>X.Y</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
### Example usage with the API
|
||||
|
||||
```
|
||||
AsyncProfiler profiler = AsyncProfiler.getInstance();
|
||||
```
|
||||
|
||||
The above gives us an instance of `AsyncProfiler` object which can be further used to start
|
||||
actual profiling.
|
||||
|
||||
```
|
||||
profiler.execute("start,jfr,event=cpu,file=/path/to/%p.jfr");
|
||||
// do some meaningful work
|
||||
profiler.execute("stop");
|
||||
```
|
||||
|
||||
`%p` equates to the PID of the process. Filename may include other placeholders which
|
||||
can be found in [Profiler Options](ProfilerOptions.md).
|
||||
`file` should be specified only once, either in
|
||||
`start` command with `jfr` output or in `stop` command with any other format.
|
||||
|
||||
## Intellij IDEA
|
||||
|
||||
Intellij IDEA comes bundled with async-profiler, which can be further configured to our needs
|
||||
by selecting the `Java Profiler` menu option at `Settings/Preferences > Build, Execution, Deployment`.
|
||||
Agent options can be modified for the specific use cases and also `Collect native calls` can be checked
|
||||
to monitor non-java threads and native frames in Java stack traces.
|
||||
41
docs/JfrVisualization.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# JFR Visualization
|
||||
|
||||
JFR recordings produced by async-profiler can be viewed using multiple options explained below.
|
||||
|
||||
## Built-in converter
|
||||
|
||||
async-profiler provides a built-in converter `jfrconv` which can be used to convert `jfr` output
|
||||
to a flame graph or one of the other supported formats. More details on the built-in converter usage
|
||||
can be found [here](ConverterUsage.md).
|
||||
|
||||
## JMC
|
||||
|
||||
[JDK Mission Control](https://www.oracle.com/java/technologies/jdk-mission-control.html) (JMC)
|
||||
is a popular GUI tool to analyze JFR recordings.
|
||||
It has been originally developed to work in conjunction with the JDK Flight Recorder,
|
||||
however, async-profiler recordings are also fully compatible with JMC.
|
||||
|
||||
When viewing async-profiler recordings in JMC, information on some tabs may be missing.
|
||||
Developers are typically interested in the following sections:
|
||||
|
||||
- Java Application
|
||||
- Method Profiling
|
||||
- Memory
|
||||
- Lock Instances
|
||||
- JVM Internals
|
||||
- TLAB Allocations
|
||||
|
||||
## IntelliJ IDEA
|
||||
|
||||
IntelliJ IDEA Ultimate has built-in JFR viewer that works perfectly with async-profiler recordings.
|
||||
For the Community Edition, there is an open-source profiler [plugin](https://plugins.jetbrains.com/plugin/20937-java-jfr-profiler)
|
||||
that allows you to profile Java applications with JFR and async-profiler as well as
|
||||
open JFR files obtained outside IDE.
|
||||
|
||||
## JFR command line tool
|
||||
|
||||
JDK distributions include the `jfr` command line utility to filter, summarize and output
|
||||
flight recording files into human-readable format. The
|
||||
[official documentation](https://docs.oracle.com/en/java/javase/21/docs/specs/man/jfr.html)
|
||||
provides complete information on how to manipulate the contents and translate it as per
|
||||
developers' needs to debug performance issues with their Java applications.
|
||||
63
docs/OutputFormats.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Output Formats
|
||||
|
||||
async-profiler currently supports the following output formats:
|
||||
|
||||
- `collapsed` - This is a collection of call stacks, where each line is a semicolon separated list of frames followed
|
||||
by a counter. This is used by the FlameGraph script to generate the FlameGraph visualization of the profile data.
|
||||
|
||||
```
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult 21
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult;java/io/DataOutputStream.writeInt 1
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult;java/io/DataOutputStream.writeInt;java/io/ByteArrayOutputStream.write 5
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult;java/io/DataOutputStream.writeUTF;java/io/DataOutputStream.writeUTF 12
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult;java/io/DataOutputStream.writeUTF;java/io/DataOutputStream.writeUTF;java/lang/String.length 3
|
||||
FileConverter.main;FileConverter.convertFile;FileConverter.saveResult;java/io/DataOutputStream.writeUTF;java/io/DataOutputStream.writeUTF;java/io/DataOutputStream.write 6
|
||||
start_thread;thread_native_entry;Thread::call_run;VMThread::run;VMThread::inner_execute;VMThread::evaluate_operation;VM_Operation::evaluate;VM_GenCollectForAllocation::doit;GenCollectedHeap::satisfy_failed_allocation;GenCollectedHeap::do_collection;GenCollectedHeap::collect_generation;DefNewGeneration::collect;DefNewGeneration::FastEvacuateFollowersClosure::do_void 12
|
||||
start_thread;thread_native_entry;Thread::call_run;VMThread::run;VMThread::inner_execute;VMThread::evaluate_operation;VM_Operation::evaluate;VM_GenCollectForAllocation::doit;GenCollectedHeap::satisfy_failed_allocation;GenCollectedHeap::do_collection;GenCollectedHeap::collect_generation;DefNewGeneration::collect;DefNewGeneration::FastEvacuateFollowersClosure::do_void;void ContiguousSpace::oop_since_save_marks_iterate<DefNewScanClosure> 1
|
||||
```
|
||||
|
||||
- `flamegraph` - FlameGraph is a hierarchical representation of call traces of the profiled software in a color coded
|
||||
format. Read more on the [interpretation](FlamegraphInterpretation.md) of FlameGraphs.
|
||||
[](https://htmlpreview.github.io/?https://github.com/async-profiler/async-profiler/blob/master/.assets/html/flamegraph.html)
|
||||
|
||||
- `tree` - Profile output generated in HTML format showing a tree view of resource usage beginning with the call stack
|
||||
with the highest resource usage and then showing other call stacks in descending order of resource usage. Expanding a
|
||||
parent frame follows the same hierarchical representation within that frame.
|
||||

|
||||
|
||||
- `text` - If no output format is specified with `-o` and filename has no extension provided, profiled output is
|
||||
generated in text format.
|
||||
|
||||
```
|
||||
--- Execution profile ---
|
||||
Total samples : 733
|
||||
|
||||
--- 8208 bytes (19.58%), 1 sample
|
||||
[ 0] byte[]
|
||||
[ 1] java.util.jar.Manifest$FastInputStream.<init>
|
||||
[ 2] java.util.jar.Manifest$FastInputStream.<init>
|
||||
[ 3] java.util.jar.Manifest.read
|
||||
[ 4] java.util.jar.Manifest.<init>
|
||||
[ 5] java.util.jar.Manifest.<init>
|
||||
[ 6] java.util.jar.JarFile.getManifestFromReference
|
||||
[ 7] java.util.jar.JarFile.getManifest
|
||||
[ 8] jdk.internal.loader.URLClassPath$JarLoader$2.getManifest
|
||||
[ 9] jdk.internal.loader.BuiltinClassLoader.defineClass
|
||||
[10] jdk.internal.loader.BuiltinClassLoader.findClassOnClassPathOrNull
|
||||
[11] jdk.internal.loader.BuiltinClassLoader.loadClassOrNull
|
||||
[12] jdk.internal.loader.BuiltinClassLoader.loadClass
|
||||
[13] jdk.internal.loader.ClassLoaders$AppClassLoader.loadClass
|
||||
[14] java.lang.ClassLoader.loadClass
|
||||
[15] java.lang.Class.forName0
|
||||
[16] java.lang.Class.forName
|
||||
[17] sun.launcher.LauncherHelper.loadMainClass
|
||||
[18] sun.launcher.LauncherHelper.checkAndLoadMain
|
||||
```
|
||||
|
||||
- `jfr` - profile format used by the JDK Flight Recorder. The `jfr` format collects data
|
||||
about the JVM as well as the Java application running on it. async-profiler can generate output in `jfr` format
|
||||
compatible with tools capable of viewing and analyzing `jfr` files. JDK Mission Control (JMC) and Intellij IDEA are
|
||||
some of many options to visualize `jfr` files. More details [here](JfrVisualization.md).
|
||||
|
||||
- `otlp` - OpenTelemetry protocol format for [profiling data](https://opentelemetry.io/blog/2024/profiling).
|
||||
Experimental feature: backward-incompatible changes may happen in future releases of async-profiler.
|
||||
130
docs/ProfilerOptions.md
Normal file
@@ -0,0 +1,130 @@
|
||||
# Profiler options
|
||||
|
||||
The below tables list the profiler options available with `asprof` and also when
|
||||
[launching as an agent](IntegratingAsyncProfiler.md#launching-as-an-agent).
|
||||
Some tables are output specific, which means some options are applicable to only one or more output formats but not all.
|
||||
|
||||
```
|
||||
Usage: asprof [action] [options] [PID]
|
||||
```
|
||||
|
||||
## Actions
|
||||
|
||||
The below options are `action`s for async-profiler and common for both `asprof` binary and when launching as an agent.
|
||||
|
||||
| Option | Description |
|
||||
| --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `start` | Start profiling in semi-automatic mode, i.e. profiler will run until `stop` command is explicitly called. |
|
||||
| `resume` | Start or resume earlier profiling session that has been stopped. All the collected data remains valid. The profiling options are not preserved between sessions, and should be specified again. |
|
||||
| `stop` | Stop profiling and print the report. |
|
||||
| `dump` | Dump collected data without stopping profiling session. |
|
||||
| `status` | Print profiling status: whether profiler is active and for how long. |
|
||||
| `metrics` | Print profiler metrics in Prometheus format. |
|
||||
| `list` | Show the list of profiling events available for the target process specified with PID. |
|
||||
|
||||
## General options
|
||||
|
||||
| asprof | Launch as agent | Description |
|
||||
| -------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-o fmt` | `fmt` | Specifies what information to dump when profiling ends. For various dump option details, please refer to [Dump Option Appendix](#dump-option). |
|
||||
| `-f FILENAME` | `file=FILENAME` | The file name to dump the profile information to.<br>`%p` in the file name is expanded to the PID of the target JVM;<br>`%t` - to the timestamp;<br>`%n{MAX}` - to the sequence number;<br>`%{ENV}` - to the value of the given environment variable.<br>Example: `asprof -o collapsed -f /tmp/traces-%t.txt 8983` |
|
||||
| `-d N` | N/A | asprof-only option designed for interactive use. It is a shortcut for running 3 actions: start, sleep for N seconds, stop. If no `start`, `resume`, `stop` or `status` option is given, the profiler will run for the specified period of time and then automatically stop.<br>Example: `asprof -d 30 <pid>` |
|
||||
| `--timeout N` | `timeout=N` | The profiling duration, in seconds. The profiler will run for the specified period of time and then automatically stop.<br>Example: `java -agentpath:/path/to/libasyncProfiler.so=start,event=cpu,timeout=30,file=profile.html <application>` |
|
||||
| `--loop TIME` | `loop=TIME` | Run profiler in a loop (continuous profiling). The argument is either a clock time (`hh:mm:ss`) or a loop duration in `s`econds, `m`inutes, `h`ours, or `d`ays. Make sure the filename includes a timestamp pattern, or the output will be overwritten on each iteration.<br>Example: `asprof --loop 1h -f /var/log/profile-%t.jfr 8983` |
|
||||
| `-e --event EVENT` | `event=EVENT` | The profiling event: `cpu`, `alloc`, `nativemem`, `lock`, `cache-misses` etc. Use `list` to see the complete list of available events.<br>Please refer to [Profiling Modes](ProfilingModes.md) for additional information. |
|
||||
| `-i --interval N` | `interval=N` | Interval has different meaning depending on the event. For CPU profiling, it's CPU time in nanoseconds. In wall clock mode, it's wall clock time. For Java method profiling or native function profiling, it's number of calls. For PMU profiling, it's number of events. Time intervals may be followed by `s` for seconds, `ms` for milliseconds, `us` for microseconds or `ns` for nanoseconds.<br>Example: `asprof -e cpu -i 5ms 8983` |
|
||||
| `--alloc N` | `alloc=N` | Allocation profiling interval in bytes or in other units, if N is followed by `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). |
|
||||
| `--tlab` | `tlab` | Use TLAB events for allocation profiling |
|
||||
| `--live` | `live` | Retain allocation samples with live objects only (object that have not been collected by the end of profiling session). Useful for finding Java heap memory leaks. |
|
||||
| `--nativemem N` | `nativemem=N` | Native memory allocation profiling. N, if specified is the interval in bytes or in other units, if N is followed by `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). Default N is 0. |
|
||||
| `--nofree` | `nofree` | Will not record free calls in native memory allocation profiling. This is relevant when tracking memory leaks is not important and there are lots of free calls. |
|
||||
| `--trace METHOD[:T]` | `trace=METHOD[:T]` | Java method to be traced, optionally followed by a latency threshold.<br>Example: `--trace my.pkg.Class.Method:50ms`.<br>Latency threshold defaults to 0 (all calls are profiled). Can be used multiple times. |
|
||||
| `--lock TIME` | `lock=TIME` | In lock profiling mode, sample contended locks whenever total lock wait time overflows the specified threshold. |
|
||||
| `--nativelock TIME` | `nativelock=TIME ` | In native lock profiling mode, sample contended pthread locks (mutex/rwlock) whenever total lock wait time overflows the specified threshold. |
|
||||
| `--wall INTERVAL` | `wall=INTERVAL` | Wall clock profiling interval. Use this option instead of `-e wall` to enable wall clock profiling with another event, typically `cpu`.<br>Example: `asprof -e cpu --wall 100ms -f combined.jfr 8983`. |
|
||||
| `--nobatch` | `nobatch` | Disable wall clock profiling optimization. Async-profiler will emit one `jdk.ExecutionSample` event for each wall clock sample instead of batching them in a custom `profiler.WallClockSample` event. |
|
||||
| `-j N` | `jstackdepth=N` | Sets the maximum stack depth. The default is 2048.<br>Example: `asprof -j 30 8983`<br>The argument may include two numbers separated by `/` (e.g. `200/40`). In this case, stack traces deeper than 200 frames will be truncated to the top 40 frames. This can be useful to prevent a deep recursion from bloating the profile. |
|
||||
| `-F features` | `features=LIST` | Comma separated (or `+` separated when launching as an agent) list of stack walking features. Supported features are:<ul><li>`stats` - log stack walking performance stats.</li><li>`vtable` - display targets of megamorphic virtual calls as an extra frame on top of `vtable stub` or `itable stub`.</li><li>`comptask` - display current compilation task (a Java method being compiled) in a JIT compiler stack trace.</li><li>`pcaddr` - display instruction addresses .</li></ul>More details [here](AdvancedStacktraceFeatures.md). |
|
||||
| `-L level` | `loglevel=level` | Log level: `debug`, `info`, `warn`, `error` or `none`. |
|
||||
| N/A | `log=FILENAME` | Dedicated file for log messages. Used internally by asprof. |
|
||||
| N/A | `quiet` | Do not log "Profiling started/stopped" message. Used internally by asprof. |
|
||||
| N/A | `server=ADDRESS` | Start insecure HTTP server with the given IP address/port to control the profiler. This option can be specified as `-agentpath` argument only. Be careful not to expose async-profiler server in a public network. |
|
||||
| `--all-user` | `alluser` | Include only user-mode events. This option is helpful when kernel profiling is restricted by `perf_event_paranoid` settings. |
|
||||
| `--sched` | `sched` | Group threads by Linux-specific scheduling policy: BATCH/IDLE/OTHER. |
|
||||
| `--cstack MODE` | `cstack=MODE` | How to walk native frames (C stack). Possible modes are `fp` (Frame Pointer), `dwarf` (DWARF unwind info), `vm`, `vmx` (HotSpot VM Structs) and `no` (do not collect C stack).<br><br>By default, C stack is shown in cpu, ctimer, wall-clock and perf-events profiles. Java-level events like `alloc` and `lock` collect only Java stack. |
|
||||
| `--signal NUM` | `signal=NUM` | Use alternative signal for cpu or wall clock profiling. To change both signals, specify two numbers separated by a slash: `--signal SIGCPU/SIGWALL`. |
|
||||
| `--clock SOURCE` | `clock=SOURCE` | Clock source for JFR timestamps: `tsc` (default) or `monotonic` (equivalent for `CLOCK_MONOTONIC`). |
|
||||
| `--begin function` | `begin=FUNCTION` | Automatically start profiling when the specified native function is executed. |
|
||||
| `--end function` | `end=FUNCTION` | Automatically stop profiling when the specified native function is executed. |
|
||||
| `--ttsp` | `ttsp` | Time-to-safepoint profiling. An alias for `--begin SafepointSynchronize::begin --end RuntimeService::record_safepoint_synchronized`.<br>It is not a separate event type, but rather a constraint. Whatever event type you choose (e.g. `cpu` or `wall`), the profiler will work as usual, except that only events between the safepoint request and the start of the VM operation will be recorded. |
|
||||
| `--nostop` | `nostop` | Record profiling window between `--begin` and `--end`, but do not stop profiling outside window. |
|
||||
| `--memlimit SIZE` | `memlimit=SIZE` | Limit memory used by the call trace storage. Once the limit is exceeded, no new stack traces will be recorded. The lowest possible limit is 10 MB; the default is unlimited.<br>Example: `asprof -e cpu --memlimit 128m` |
|
||||
| `--libpath PATH` | N/A | Full path to `libasyncProfiler.so` (useful when profiling a container from the host). |
|
||||
| `--filter FILTER` | `filter=FILTER` | In the wall-clock profiling mode, profile only threads with the specified ids.<br>Example: `asprof -e wall -d 30 --filter 120-127,132,134 Computey` |
|
||||
| `--fdtransfer` | `fdtransfer` | Run a background process that provides access to perf_events to an unprivileged process. `--fdtransfer` is useful for profiling a process in a container (which lacks access to perf_events) from the host.<br>See [Profiling Java in a container](ProfilingInContainer.md). |
|
||||
| `--target-cpu` | `target-cpu` | In perf_events profiling mode, instruct the profiler to only sample threads running on the specified CPU, defaults to -1.<br>Example: `asprof --target-cpu 3`. |
|
||||
| `--record-cpu` | `record-cpu` | In perf_events profiling mode, instruct the profiler to capture which CPU a sample was taken on. |
|
||||
| `-v --version` | `version` | Prints the version of profiler library. If PID is specified, gets the version of the library loaded into the given process. |
|
||||
|
||||
## Options applicable to JFR output only
|
||||
|
||||
| asprof | Launch as agent | Description |
|
||||
| ------------------- | ------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--chunksize N` | `chunksize=N` | Approximate size for a single JFR chunk. A new chunk will be started whenever specified size is reached. The default `chunksize` is 100MB.<br>Example: `asprof -f profile.jfr --chunksize 100m 8983` |
|
||||
| `--chunktime N` | `chunktime=N` | Approximate time limit for a single JFR chunk. A new chunk will be started whenever specified time limit is reached. The default `chunktime` is 1 hour.<br>Example: `asprof -f profile.jfr --chunktime 1h 8983` |
|
||||
| `--jfropts OPTIONS` | `jfropts=OPTIONS` | Comma separated list of JFR recording options. Currently, the only available option is `mem` supported on Linux 3.17+. `mem` enables accumulating events in memory instead of flushing synchronously to a file. |
|
||||
| `--jfrsync CONFIG` | `jfrsync[=CONFIG]` | Start Java Flight Recording with the given configuration synchronously with the profiler. The output .jfr file will include all regular JFR events, except that execution samples will be obtained from async-profiler. This option implies `-o jfr`.<br>`CONFIG` is a predefined JFR profile or a JFR configuration file (.jfc) or a list of JFR events started with `+`.<br>Example: `asprof -e cpu --jfrsync profile -f combined.jfr 8983` |
|
||||
| `--proc INTERVAL` | `proc=INTERVAL` | Collect statistics about other processes in the system. Default sampling interval is 30s. |
|
||||
| `--all` | `all` | Shorthand for enabling `cpu`, `wall`, `alloc`, `live`, `lock`, `nativelock`, `nativemem`, and `proc` profiling simultaneously. This can be combined with `--alloc 2m --lock 10ms` etc. to pass custom interval/threshold. It is also possible to combine it with `-e` argument to change the type of event being collected (default is `cpu`). This is not recommended for production, especially for continuous profiling. |
|
||||
|
||||
## Options applicable to FlameGraph and Tree view outputs only
|
||||
|
||||
| asprof | Launch as agent | Description |
|
||||
| -------------------- | ------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--title TITLE` | `title=TITLE` | Custom title of a FlameGraph.<br>Example: `asprof -f profile.html --title "Sample CPU profile" 8983` |
|
||||
| `--minwidth PERCENT` | `minwidth=PERCENT` | Minimum frame width as a percentage. Smaller frames will not be visible.<br>Example: `asprof -f profile.html --minwidth 0.5 8983` |
|
||||
| `--reverse` | `reverse` | Reverse stack traces (defaults to icicle graph).<br>Example: `asprof -f profile.html --reverse 8983` |
|
||||
| `--inverted` | `inverted` | Toggles the layout for reversed stacktraces from icicle to flamegraph and for default stacktraces from flamegraph to icicle.<br>Example: `asprof -f profile.html --inverted 8983` |
|
||||
|
||||
Notice that `--reverse` and `--inverted` are orthogonal settings. By default, flamegraphs grow from bottom to top (because flames grow from bottom to top). The outermost frames (e.g. the `main()` function) are shown at the bottom while the innermost, leaf frames are shown at the top. If such a flame graph is mirrored on the y-axis, it becomes an icicle graph (icicles grow top-down). The default setting for this layout can be toggled with the `--inverted` option when the graph is created or changed later with the `Invert` button which is located in the upper-left corner of the generated HTML page, when the graph is displayed.
|
||||
|
||||
By default, async-profiler merges stack traces starting from the outermost (e.g. `main()`) frames and displays them from bottom to top in a flamegraph. The `--reverse` option can be used to create reverse stack traces, i.e. merge them starting with the innermost, leaf frames. By default, reversed stack traces are displayed from top to bottom as icicle graphs. The default layout setting for both, normal and reversed stack traces can be changed with the `--inverted` option.
|
||||
|
||||
## Options applicable to any output format except JFR
|
||||
|
||||
| asprof | Launch as agent | Description |
|
||||
| -------------- | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `-t --threads` | `threads` | Profile threads separately. Each stack trace will end with a frame that denotes a single thread.<br>Example: `asprof -t 8983` |
|
||||
| `-s --simple` | `simple` | Print simple class names instead of fully qualified names. |
|
||||
| `-n --norm` | `norm` | Normalize names of hidden classes / lambdas. |
|
||||
| `-g --sig` | `sig` | Print method signatures. |
|
||||
| `-a --ann` | `ann` | Annotate JIT compiled methods with `_[j]`, inlined methods with `_[i]`, interpreted methods with `_[0]` and C1 compiled methods with `_[1]`. FlameGraph and Tree view will color frames depending on their type regardless of this option. |
|
||||
| `-l --lib` | `lib` | Prepend library names to symbols, e.g. ``libjvm.so`JVM_DefineClassWithSource``. |
|
||||
| `--dot` | `dot` | Dotted class names, e.g. `java.lang.String` instead of `java/lang/String`. |
|
||||
| `--samples` | `samples` | Count the number of samples. This is the default aggregation option. |
|
||||
| `--total` | `total` | Count the total value of the collected metric instead of the number of samples, e.g. total allocation size. |
|
||||
| `-I PATTERN` | `include=PATTERN` | Filter stack traces by the given pattern(s). `-I` defines the name pattern that _must_ be present in the stack traces. `-I` can be specified multiple times. A pattern may begin or end with a star `*` that denotes any (possibly empty) sequence of characters.<br>Example: `asprof -I 'Primes.*' -I 'java/*' 8983` |
|
||||
| `-X PATTERN` | `exclude=PATTERN` | Filter stack traces by the given pattern(s). `-X` defines the name pattern that _must not_ occur in any of stack traces in the output. `-X` can be specified multiple times. A pattern may begin or end with a star `*` that denotes any (possibly empty) sequence of characters.<br>Example: `asprof -X '*Unsafe.park*' 8983` |
|
||||
| N/A | `mcache[=AGE]` | Maximum age of the method name cache. Default is `0` (do not cache method names between profiling sessions). |
|
||||
|
||||
## Appendix
|
||||
|
||||
### Dump Option
|
||||
|
||||
`-o fmt` - specifies what information to dump when profiling ends.
|
||||
`fmt` can be one of the following options:
|
||||
|
||||
- `traces[=N]` - dump call traces (at most N samples);
|
||||
- `flat[=N]` - dump flat profile (top N hot methods);
|
||||
- can be combined with `traces`, e.g. `traces=200,flat=200`
|
||||
- `jfr` - dump events in JDK Flight Recorder format readable by JDK Mission Control.
|
||||
- `collapsed` - dump collapsed call traces in the format used by
|
||||
[FlameGraph](https://github.com/brendangregg/FlameGraph) script. This is
|
||||
a collection of call stacks, where each line is a semicolon separated list
|
||||
of frames followed by a counter.
|
||||
- `flamegraph` - produce Flame Graph in HTML format.
|
||||
- `tree` - produce Call Tree in HTML format.
|
||||
- `--reverse` option will generate backtrace view.
|
||||
- `otlp` - dump events in OpenTelemetry format.
|
||||
|
||||
It is possible to specify multiple dump options at the same time.
|
||||
24
docs/ProfilingInContainer.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Profiling Java in a container
|
||||
|
||||
async-profiler provides the ability to profile Java processes running in a Docker or LXC
|
||||
container both from within a container and from the host system.
|
||||
|
||||
When profiling from the host, `pid` should be the Java process ID in the host
|
||||
namespace. Use `ps aux | grep java` or `docker top <container>` to find
|
||||
the process ID.
|
||||
|
||||
async-profiler should be run from the host by a privileged user - it will
|
||||
automatically switch to the proper pid/mount namespace and change
|
||||
user credentials to match the target process. Also make sure that
|
||||
the target container can access `libasyncProfiler.so` by the same
|
||||
absolute path as on the host. Alternatively, specify `--libpath` option
|
||||
to override path to `libasyncProfiler.so` in a container.
|
||||
|
||||
By default, Docker container restricts the access to `perf_event_open`
|
||||
syscall. There are 3 alternatives to allow profiling in a container:
|
||||
|
||||
1. You can modify the [seccomp profile](https://docs.docker.com/engine/security/seccomp/)
|
||||
or disable it altogether with `--security-opt seccomp=unconfined` option. In
|
||||
addition, `--cap-add SYS_ADMIN` may be required.
|
||||
2. You can use "fdtransfer": see the help for `--fdtransfer`.
|
||||
3. Last, you may fall back to `-e ctimer` profiling mode, see [Troubleshooting](Troubleshooting.md).
|
||||
347
docs/ProfilingModes.md
Normal file
@@ -0,0 +1,347 @@
|
||||
# Profiling modes
|
||||
|
||||
Besides CPU time, async-profiler provides various other profiling modes such as `Allocation`, `Wall Clock`, `Java Method`
|
||||
and even a `Multiple Events` profiling mode.
|
||||
|
||||
## CPU profiling
|
||||
|
||||
In this mode, profiler collects stack trace samples that include **Java** methods,
|
||||
**native** calls, **JVM** code and **kernel** functions.
|
||||
|
||||
The general approach is receiving call stacks generated by `perf_events`
|
||||
and matching them up with call stacks generated by `AsyncGetCallTrace`,
|
||||
in order to produce an accurate profile of both Java and native code.
|
||||
Additionally, async-profiler provides a workaround to recover stack traces
|
||||
in some [corner cases](https://bugs.openjdk.java.net/browse/JDK-8178287)
|
||||
where `AsyncGetCallTrace` fails.
|
||||
|
||||
This approach has the following advantages compared to using `perf_events`
|
||||
directly with a Java agent that translates addresses to Java method names:
|
||||
|
||||
- Does not require `-XX:+PreserveFramePointer`, which introduces
|
||||
performance overhead that can be sometimes as high as 10%.
|
||||
|
||||
- Does not require starting JVM with an agent for translating Java code addresses
|
||||
to method names.
|
||||
|
||||
- Displays interpreter frames.
|
||||
|
||||
- Does not produce large intermediate files (perf.data) for further processing in
|
||||
user space scripts.
|
||||
|
||||
If you wish to resolve frames within `libjvm`, the [debug symbols](#installing-debug-symbols) are required.
|
||||
|
||||
## ALLOCATION profiling
|
||||
|
||||
The profiler can be configured to collect call sites where the largest amount
|
||||
of heap memory is allocated.
|
||||
|
||||
async-profiler does not use intrusive techniques like bytecode instrumentation
|
||||
or expensive DTrace probes which have significant performance impact.
|
||||
It also does not affect Escape Analysis or prevent from JIT optimizations
|
||||
like allocation elimination. Only actual heap allocations are measured.
|
||||
|
||||
The profiler features TLAB-driven sampling. It relies on HotSpot-specific
|
||||
callbacks to receive two kinds of notifications:
|
||||
|
||||
- when an object is allocated in a newly created TLAB;
|
||||
- when an object is allocated on a slow path outside TLAB.
|
||||
|
||||
Sampling interval can be adjusted with `--alloc` option.
|
||||
For example, `--alloc 500k` will take one sample after 500 KB of allocated
|
||||
space on average. Prior to JDK 11, intervals less than TLAB size will not take effect.
|
||||
|
||||
In allocation profiling mode, the top frame of every call trace is the class
|
||||
of the allocated object, and the counter is the heap pressure (the total size
|
||||
of allocated TLABs or objects outside TLAB).
|
||||
|
||||
### Installing Debug Symbols
|
||||
|
||||
Prior to JDK 11, the allocation profiler required HotSpot debug symbols.
|
||||
Some OpenJDK distributions (Amazon Corretto, Liberica JDK, Azul Zulu)
|
||||
already have them embedded in `libjvm.so`, other OpenJDK builds typically
|
||||
provide debug symbols in a separate package. For example, to install
|
||||
OpenJDK debug symbols on Debian / Ubuntu, run:
|
||||
|
||||
```
|
||||
# apt install openjdk-17-dbg
|
||||
```
|
||||
|
||||
(replace `17` with the desired version of JDK).
|
||||
|
||||
On CentOS, RHEL and some other RPM-based distributions, this could be done with
|
||||
[debuginfo-install](http://man7.org/linux/man-pages/man1/debuginfo-install.1.html) utility:
|
||||
|
||||
```
|
||||
# debuginfo-install java-1.8.0-openjdk
|
||||
```
|
||||
|
||||
On Gentoo, the `icedtea` OpenJDK package can be built with the per-package setting
|
||||
`FEATURES="nostrip"` to retain symbols.
|
||||
|
||||
The `gdb` tool can be used to verify if debug symbols are properly installed for the `libjvm` library.
|
||||
For example, on Linux:
|
||||
|
||||
```
|
||||
$ gdb $JAVA_HOME/lib/server/libjvm.so -ex 'info address UseG1GC'
|
||||
```
|
||||
|
||||
This command's output will either contain `Symbol "UseG1GC" is at 0xxxxx`
|
||||
or `No symbol "UseG1GC" in current context`.
|
||||
|
||||
## Native memory leaks
|
||||
|
||||
The profiling mode `nativemem` records `malloc`, `realloc`, `calloc` and `free` calls
|
||||
with the addresses, so that allocations can be matched with frees. This helps to focus
|
||||
the profile report only on unfreed allocations, which are the likely to be a source of a memory leak.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
asprof start -e nativemem -f app.jfr <YourApp>
|
||||
# or
|
||||
asprof start --nativemem N -f app.jfr <YourApp>
|
||||
# or if only allocation calls are interesting, do not collect free calls:
|
||||
asprof start --nativemem N --nofree -f app.jfr <YourApp>
|
||||
|
||||
asprof stop <YourApp>
|
||||
```
|
||||
|
||||
Now we need to process the jfr file, to find native memory leaks:
|
||||
|
||||
```
|
||||
# --total for bytes, default counts invocations.
|
||||
jfrconv --total --nativemem --leak app.jfr app-leak.html
|
||||
|
||||
# No leak analysis, include all native allocations:
|
||||
jfrconv --total --nativemem app.jfr app-malloc.html
|
||||
```
|
||||
|
||||
When `--leak` option is used, the generated flame graph will show allocations without matching `free` calls.
|
||||
|
||||

|
||||
|
||||
To avoid bias towards youngest allocations not freed by the end of the profiling session,
|
||||
leak profiler ignores tail allocations made in the last 10% of the profiling period.
|
||||
Tail length can be altered with `--tail` option that accepts `ratio` or `percent%` as an argument.
|
||||
For example, to ignore allocations in the last 2 minutes of a 10 minutes profile, use
|
||||
|
||||
```
|
||||
jfrconv --nativemem --leak --tail 20% app.jfr app-leak.html
|
||||
```
|
||||
|
||||
The overhead of `nativemem` profiling depends on the number of native allocations,
|
||||
but is usually small enough even for production use. If required, the overhead can be reduced
|
||||
by configuring the profiling interval. E.g. if you add `nativemem=1m` profiler option,
|
||||
allocation samples will be limited to at most one sample per allocated megabyte.
|
||||
|
||||
### Using LD_PRELOAD for finding native memory leaks
|
||||
|
||||
Similar to Java applications, `nativemem` mode can be also used with [non-Java processes](ProfilingNonJavaApplications.md).
|
||||
|
||||
Run an application with `nativemem` profiler that dumps recordings in JFR format every 10 minutes:
|
||||
|
||||
```
|
||||
LD_PRELOAD=/path/to/libasyncProfiler.so ASPROF_COMMAND=start,nativemem,total,loop=10m,cstack=dwarf,file=profile-%t.jfr NativeApp [args]
|
||||
```
|
||||
|
||||
Then run `jfrconv` to generate memory leak report as a flame graph:
|
||||
|
||||
```
|
||||
jfrconv --total --nativemem --leak <profile>.jfr <profile>-leak.html
|
||||
```
|
||||
|
||||
## Wall-clock profiling
|
||||
|
||||
`-e wall` option tells async-profiler to sample all threads equally every given
|
||||
period of time regardless of thread status: Running, Sleeping or Blocked.
|
||||
For instance, this can be helpful when profiling application start-up time.
|
||||
|
||||
Wall-clock profiler is most useful in per-thread mode: `-t`.
|
||||
|
||||
Example: `asprof -e wall -t -i 50ms -f result.html 8983`
|
||||
|
||||
## Lock profiling
|
||||
|
||||
`-e lock` option tells async-profiler to measure lock contention in the profiled application. Lock profiling can help
|
||||
developers understand lock acquisition patterns, lock contention (when threads have to wait to acquire locks), time
|
||||
spent waiting for locks and which code paths are blocked due to locks.
|
||||
|
||||
In lock profiling mode, the top frame is the class of lock/monitor, and the counter is number of nanoseconds it took to
|
||||
enter this lock/monitor.
|
||||
|
||||
Example: `asprof -e lock -t -i 5ms -f result.html 8983`
|
||||
|
||||
## Native lock profiling
|
||||
|
||||
`--nativelock` option tells async-profiler to measure pthread lock contention in the profiled application.
|
||||
Native lock profiling can help developers understand pthread lock acquisition patterns, lock contention (when threads
|
||||
have to wait to acquire native locks), time spent waiting for pthread mutexes and read-write locks, and which code paths
|
||||
are blocked due to native synchronization primitives.
|
||||
|
||||
Native lock profiling works by intercepting calls to:
|
||||
|
||||
- [`pthread_mutex_lock`](https://man7.org/linux/man-pages/man3/pthread_mutex_lock.3p.html)
|
||||
- [`pthread_rwlock_rdlock`](https://man7.org/linux/man-pages/man3/pthread_rwlock_rdlock.3p.html)
|
||||
- [`pthread_rwlock_wrlock`](https://man7.org/linux/man-pages/man3/pthread_rwlock_wrlock.3p.html)
|
||||
|
||||
In this mode, the top frame shows the native function that experienced contention (e.g., pthread_mutex_lock_hook),
|
||||
and the counter represents the number of nanoseconds threads spent waiting to acquire the lock.
|
||||
|
||||
Key differences from Java lock profiling:
|
||||
|
||||
- Profiles native pthread locks instead of Java monitors.
|
||||
- Works with C/C++ applications and native libraries used by Java applications.
|
||||
- Captures contention in native code paths that Java lock profiling cannot see.
|
||||
|
||||
Example: `asprof --nativelock 5ms -t -f result.html 8983`
|
||||
|
||||
## Java method profiling
|
||||
|
||||
`-e ClassName.methodName` option instruments the given Java method
|
||||
in order to record all invocations of this method with the stack traces.
|
||||
|
||||
Example: `-e java.util.Properties.getProperty` will profile all places
|
||||
where `getProperty` method is called from.
|
||||
|
||||
Only non-native Java methods are supported. To profile a native method,
|
||||
use hardware breakpoint event instead, e.g. `-e Java_java_lang_Throwable_fillInStackTrace`
|
||||
|
||||
**Be aware** that if you attach async-profiler at runtime, the first instrumentation
|
||||
of a non-native Java method may cause the [deoptimization](https://github.com/openjdk/jdk/blob/bf2e9ee9d321ed289466b2410f12ad10504d01a2/src/hotspot/share/prims/jvmtiRedefineClasses.cpp#L4092-L4096)
|
||||
of all compiled methods. The subsequent instrumentation flushes only the _dependent code_.
|
||||
|
||||
The massive CodeCache flush doesn't occur if attaching async-profiler as an agent.
|
||||
|
||||
### Latency profiling
|
||||
|
||||
Please refer to our blog post on [latency profiling](https://github.com/async-profiler/async-profiler/discussions/1497)
|
||||
to know more about this profiling mode.
|
||||
|
||||
## Native function profiling
|
||||
|
||||
Here are some useful native functions to profile:
|
||||
|
||||
- `G1CollectedHeap::humongous_obj_allocate` - trace _humongous allocations_ of the G1 GC,
|
||||
- `JVM_StartThread` - trace creation of new Java threads,
|
||||
- `Java_java_lang_ClassLoader_defineClass1` - trace class loading.
|
||||
|
||||
## Multiple events
|
||||
|
||||
It is possible to profile CPU, allocations, and locks at the same time.
|
||||
Instead of CPU, you may choose any other execution event: wall-clock,
|
||||
perf event, tracepoint, Java method, etc.
|
||||
|
||||
The only output format that supports multiple events together is JFR.
|
||||
The recording will contain the following event types:
|
||||
|
||||
- `jdk.ExecutionSample`
|
||||
- `jdk.ObjectAllocationInNewTLAB` (alloc)
|
||||
- `jdk.ObjectAllocationOutsideTLAB` (alloc)
|
||||
- `jdk.JavaMonitorEnter` (lock)
|
||||
- `jdk.ThreadPark` (lock)
|
||||
|
||||
To start profiling cpu + allocations + locks together, specify
|
||||
|
||||
```
|
||||
asprof -e cpu,alloc,lock -f profile.jfr ...
|
||||
```
|
||||
|
||||
or use `--alloc` and `--lock` parameters with the desired threshold:
|
||||
|
||||
```
|
||||
asprof -e cpu --alloc 2m --lock 10ms -f profile.jfr ...
|
||||
```
|
||||
|
||||
The same, when starting profiler as an agent:
|
||||
|
||||
```
|
||||
-agentpath:/path/to/libasyncProfiler.so=start,event=cpu,alloc=2m,lock=10ms,file=profile.jfr
|
||||
```
|
||||
|
||||
### Multi-event profiling using `--all`
|
||||
|
||||
The `--all` flag offers a way to simultaneously enable predefined collection of common profiling events. By default, `--all` activates profiling for `cpu`, `wall`, `alloc`, `live`, `lock` and `nativemem`.
|
||||
|
||||
**Important consideration**
|
||||
|
||||
While the `--all` flag can be useful for development environments to get a wide overview, it is not recommended to enable this in production, especially for continuous profiling. Users are invited to select carefully what to profile and with what settings.
|
||||
|
||||
**Sample command:**
|
||||
|
||||
This command enables the default set of events included in `--all`:
|
||||
|
||||
```
|
||||
asprof --all -f profile.jfr
|
||||
```
|
||||
|
||||
or combine it with `--alloc`/`--wall`/`--lock`/`--nativemem` options to override individual settings. For example:
|
||||
|
||||
```
|
||||
asprof --all --alloc 2m --lock 10ms -f profile.jfr
|
||||
```
|
||||
|
||||
The same, when starting profiler as an agent:
|
||||
|
||||
```
|
||||
-agentpath:/path/to/libasyncProfiler.so=start,all,alloc=2m,lock=10ms,file=profile.jfr
|
||||
```
|
||||
|
||||
Instead of `cpu`, it is possible to override the `--all` parameter with any other event type of your choice. For instance, the following command will profile `cycles` along with ` wall`, `alloc`, `live`, `lock` and `nativemem`:
|
||||
|
||||
```
|
||||
asprof --all -e cycles -f profile.jfr
|
||||
```
|
||||
|
||||
## Continuous profiling
|
||||
|
||||
Continuous profiling is a means by which an application can be profiled
|
||||
continuously and dump profile results every specified time period.
|
||||
It is a very effective technique in finding performance degradations proactively
|
||||
and efficiently. Continuous profiling helps users to understand performance
|
||||
differences between versions of the same application. Recent outputs can
|
||||
be compared with continuous profiling output history to find differences
|
||||
and optimize the changes introduced in case of performance degradations.
|
||||
aysnc-profiler provides the ability to continously profile an application with
|
||||
the `loop` option. Make sure the filename includes a timestamp pattern, or the
|
||||
output will be overwritten on each iteration.
|
||||
|
||||
```
|
||||
asprof --loop 1h -f /var/log/profile-%t.jfr 8983
|
||||
```
|
||||
|
||||
## perf event types supported on Linux
|
||||
|
||||
| Usage | Description |
|
||||
| ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Predefined: | |
|
||||
| `-e cpu-clock` | High-resolution per-CPU timer. Similar to `-e cpu` but forces using perf_events. |
|
||||
| `-e page-faults` | Software page faults |
|
||||
| `-e context-switches` | Context switches |
|
||||
| `-e cycles` | Total CPU cycles |
|
||||
| `-e ref-cycles` | CPU reference cycles, not affected by CPU frequency scaling |
|
||||
| `-e instructions` | Retired CPU instructions |
|
||||
| `-e cache-references` | Cache accesses (usually Last Level Cache, but may depend on the architecture) |
|
||||
| `-e cache-misses` | Cache accesses requiring fetching data from a higher-level cache or main memory |
|
||||
| `-e branch-instructions` | Retired branch instructions |
|
||||
| `-e branch-misses` | Mispredicted branch instructions |
|
||||
| `-e bus-cycles` | Bus cycles |
|
||||
| `-e L1-dcache-load-misses` | Cache misses on Level 1 Data Cache |
|
||||
| `-e LLC-load-misses` | Cache misses on the Last Level Cache |
|
||||
| `-e dTLB-load-misses` | Data load misses on the Translation Lookaside Buffer |
|
||||
| Breakpoint: | |
|
||||
| `-e mem:<addr>` | Breakpoint on a decimal or hex (0x) address |
|
||||
| `-e mem:<func>` | Breakpoint on a public or a private symbol |
|
||||
| `-e mem:<func>[+<offset>][/<len>][:rwx>]` | Breakpoint on a symbol or an address with offset, length and read/write/exec. Address, offset and length can be hex or dec. The format of `mem` event is the same as in [`perf-record`](https://man7.org/linux/man-pages/man1/perf-record.1.html). |
|
||||
| `-e <symbol>` | Equivalent to an execution breakpoint on a symbol: `mem:<symbol>:x`. Example: `-e strcmp` will trace all calls of native `strcmp` function. |
|
||||
| Tracepoint: | |
|
||||
| `-e trace:<id>` | Kernel tracepoint with the given numeric id |
|
||||
| `-e <tracepoint>` | Kernel tracepoint with the specified name. Example: `-e syscalls:sys_enter_open` will trace all `open` syscalls. |
|
||||
| Probes: | |
|
||||
| `-e kprobe:<func>[+<offset>]` | Kernel probe. Example: `-e kprobe:do_sys_open`. |
|
||||
| `-e kretprobe:<func>[+<offset>]` | Kernel return probe. Example: `-e kretprobe:do_sys_open`. |
|
||||
| `-e uprobe:<func>[+<offset>]` | Userspace probe. Example: `-e uprobe:/usr/lib64/libc-2.17.so+0x114790`. |
|
||||
| `-e uretprobe:<func>[+<offset>]` | Userspace return probe |
|
||||
| PMU: | |
|
||||
| `-e r<NNN>` | Architecture-specific PMU event with the given number. Example: `-e r4d2` selects `MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM` event, which corresponds to event 0xd2, umask 0x4. |
|
||||
| `-e <pmu descriptor>` | PMU event descriptor. Example: `-e cpu/cache-misses/`, `-e cpu/event=0xd2,umask=4/`. The same syntax can be used for uncore and vendor-specific events, e.g. `amd_l3/event=0x01,umask=0x80/` |
|
||||
95
docs/ProfilingNonJavaApplications.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# Profiling Non-Java applications
|
||||
|
||||
The scope of profiling non-Java applications is limited to the case when profiler is controlled
|
||||
programmatically from the process being profiled or with `LD_PRELOAD`. It is worth noting that
|
||||
[dynamic attach](IntegratingAsyncProfiler.md#launching-as-an-agent)
|
||||
which is available for Java is not supported for non-Java profiling.
|
||||
|
||||
## LD_PRELOAD
|
||||
|
||||
async-profiler can be injected into a native application through the `LD_PRELOAD` mechanism:
|
||||
|
||||
```
|
||||
LD_PRELOAD=/path/to/libasyncProfiler.so ASPROF_COMMAND=start,event=cpu,file=profile.jfr NativeApp [args]
|
||||
```
|
||||
|
||||
All basic functionality remains the same. Profiler can run in `cpu`, `wall`, `nativemem` and other perf_events
|
||||
modes. Flame Graph and JFR output formats are supported, although JFR files will obviously lack
|
||||
Java-specific events.
|
||||
|
||||
See [Profiling Modes](ProfilingModes.md) for more examples.
|
||||
|
||||
## Controlling async-profiler via the C API
|
||||
|
||||
Similar to the
|
||||
[Java API](IntegratingAsyncProfiler.md#using-java-api),
|
||||
there is a C API for using profiler inside a native application.
|
||||
|
||||
Header file for the API is bundled in the async-profiler release package under [`include/asprof.h`](../src/asprof.h).
|
||||
|
||||
To use it in a C/C++ application, include the mentioned `asprof.h`. Below is an example showing how to invoke async-profiler with the API:
|
||||
|
||||
```
|
||||
#include "asprof.h"
|
||||
#include <dlfcn.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
void test_output_callback(const char* buffer, size_t size) {
|
||||
fwrite(buffer, sizeof(char), size, stderr);
|
||||
}
|
||||
|
||||
int main() {
|
||||
void* lib = dlopen("/path/to/libasyncProfiler.so", RTLD_NOW);
|
||||
if (lib == NULL) {
|
||||
printf("%s\n", dlerror());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
asprof_init_t asprof_init = (asprof_init_t)dlsym(lib, "asprof_init");
|
||||
asprof_execute_t asprof_execute = (asprof_execute_t)dlsym(lib, "asprof_execute");
|
||||
asprof_error_str_t asprof_error_str = (asprof_error_str_t)dlsym(lib, "asprof_error_str");
|
||||
|
||||
if (asprof_init == NULL || asprof_execute == NULL || asprof_error_str == NULL) {
|
||||
printf("%s\n", dlerror());
|
||||
dlclose(lib);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
asprof_init();
|
||||
|
||||
printf("Starting profiler\n");
|
||||
|
||||
char cmd[] = "start,event=cpu,loglevel=debug,file=profile.jfr";
|
||||
asprof_error_t err = asprof_execute(cmd, test_output_callback);
|
||||
if (err != NULL) {
|
||||
fprintf(stderr, "%s\n", asprof_error_str(err));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// ... some meaningful work ...
|
||||
|
||||
printf("Stopping profiler\n");
|
||||
|
||||
err = asprof_execute("stop", test_output_callback);
|
||||
if (err != NULL) {
|
||||
fprintf(stderr, "%s\n", asprof_error_str(err));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
## Unstable APIs
|
||||
|
||||
These APIs are unstable and might change or be removed in the next version of async-profiler.
|
||||
|
||||
### Advanced Sampling
|
||||
|
||||
The `asprof_get_thread_local_data` function returns a pointer to async-profiler's
|
||||
thread-local data structure. The structure is guaranteed to live as long as the thread.
|
||||
|
||||
The returned structure contains a pointer that increments every time there is a sample. This gives
|
||||
native code an easy way to detect when a sample event had occurred, and to log metadata about what the
|
||||
program was doing when the event happened.
|
||||
56
docs/StackWalkingModes.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# Stack Walking Modes
|
||||
|
||||
## Frame Pointer
|
||||
|
||||
`Frame Pointer (FP)` stack walking is a technique for collecting call stacks by tracking frame pointers in memory.
|
||||
Each function call maintains a pointer to its caller's stack frame, creating a linked chain that can be traversed
|
||||
to reconstruct the program's execution path. It's particularly efficient as it is very fast compared to other
|
||||
stack walking methods introducing less overhead but requires code to be compiled with frame
|
||||
pointers enabled (`-fno-omit-frame-pointer`).
|
||||
|
||||
Before async-profiler 4.2, Frame Pointer was the default stack walking mode.
|
||||
Since version 4.2, the default was changed to [VM Structs](#vm-structs).
|
||||
|
||||
## DWARF
|
||||
|
||||
DWARF stack walking is a method to reconstruct call stacks using unwinding information embedded in executables
|
||||
(typically in `.eh_frame` section). Unlike frame-pointer-based unwinding, it works reliably even with optimized code
|
||||
where frame pointers are omitted.
|
||||
|
||||
DWARF unwinding requires extra memory (e.g. the lookup table for `libjvm.so` is about 2MB).
|
||||
It is also slower than the traditional FP-based stack walker, but it's still fast enough for on-the-fly unwinding
|
||||
due to being signal safe in async-profiler.
|
||||
|
||||
The feature can be enabled with the option `--cstack dwarf` (or its agent equivalent `cstack=dwarf`).
|
||||
|
||||
## VM Structs
|
||||
|
||||
async-profiler can leverage JVM internal structures to replicate the logic of Java stack walking
|
||||
in the profiler itself without depending on the unstable JVM API.
|
||||
|
||||
This mode of stack walking has been introduced in async-profiler due to issues with `AsyncCallGetTrace`.
|
||||
AsyncGetCallTrace (AGCT) is a non-standard extension of HotSpot JVM to obtain Java stack traces outside safepoints.
|
||||
async-profiler had been relying on AGCT heavily, and it even got its name after this function.
|
||||
|
||||
`AsyncGetCallTrace` being non-API, was never supported in OpenJDK well enough, it did not receive enough testing, it was
|
||||
broken several times even in minor JDK updates, e.g. [JDK-8307549](https://bugs.openjdk.org/browse/JDK-8307549).
|
||||
|
||||
AsyncGetCallTrace is notorious for its inability to walk Java stack in different corner cases. There is a long-standing
|
||||
bug [JDK-8178287](https://bugs.openjdk.org/browse/JDK-8178287) with several examples. But the worst aspect is that
|
||||
AsyncGetCallTrace can crash JVM, and there is no reliable way to get around this outside the JVM.
|
||||
|
||||
Due to issues with AGCT from time to time, including random crashes and missing stack traces,
|
||||
`vm` stack walking mode based on HotSpot VM Structs was introduced in async-profiler.
|
||||
`vm` stack walker has the following advantages:
|
||||
|
||||
- Fully enclosed by the crash protection based on `setjmp`/`longjmp`.
|
||||
- Can show all frames: Java, native and JVM stubs throughout the whole stack.
|
||||
- Provides additional information on each frame, like JIT compilation type.
|
||||
|
||||
The feature can be enabled with the option `--cstack vm` (or its agent equivalent `cstack=vm`).
|
||||
Since async-profiler 4.2, this is the default mode when running on the HotSpot JVM.
|
||||
|
||||
Another variant of this option: `--cstack vmx` activates an "expert" unwinding based on VM Structs.
|
||||
With this option, async-profiler collects mixed stack traces that have Java and native frames interleaved.
|
||||
|
||||
The maximum stack depth for `vm` or `vmx` stack walking is controlled with `-j depth` option.
|
||||
133
docs/Troubleshooting.md
Normal file
@@ -0,0 +1,133 @@
|
||||
# Troubleshooting
|
||||
|
||||
## Error Messages
|
||||
|
||||
### perf_event mmap failed: Operation not permitted
|
||||
|
||||
Profiler allocates 8 kB perf_event buffer for each thread of the target process.
|
||||
The above error may appear if the total size of perf_event buffers (`8 * threads` kB)
|
||||
exceeds locked memory limit. This limit is comprised of `ulimit -l` plus
|
||||
the value of `kernel.perf_event_mlock_kb` sysctl multiplied by the number of CPU cores.
|
||||
For example, on a 16-core machine, `ulimit -l 65536` and `kernel.perf_event_mlock_kb=516`
|
||||
is enough for profiling `(65536 + 516*16) / 8 = 9224` threads.
|
||||
If an application has more threads, increase one of the above limits, or native stacks
|
||||
will not be collected for some threads.
|
||||
|
||||
A privileged process is not subject to the locked memory limit.
|
||||
|
||||
### Failed to change credentials to match the target process: Operation not permitted
|
||||
|
||||
Due to limitation of HotSpot Dynamic Attach mechanism, the profiler must be run
|
||||
by exactly the same user (and group) as the owner of target JVM process.
|
||||
If profiler is run by a different user, it will try to automatically change
|
||||
current user and group. This will likely succeed for `root`, but not for
|
||||
other users, resulting in the above error.
|
||||
|
||||
### Could not start attach mechanism: No such file or directory
|
||||
|
||||
The profiler cannot establish communication with the target JVM through UNIX domain socket.
|
||||
Usually this happens in one of the following cases:
|
||||
|
||||
1. Attach socket `/tmp/.java_pidNNN` has been deleted. It is a common
|
||||
practice to clean `/tmp` automatically with some scheduled script.
|
||||
Configure the cleanup software to exclude `.java_pid*` files from deletion.
|
||||
|
||||
- How to check: run `lsof -p PID | grep java_pid`. If it lists a socket file, but the file does not exist, then this is exactly
|
||||
the described problem.
|
||||
|
||||
2. JVM is started with `-XX:+DisableAttachMechanism` option.
|
||||
3. `/tmp` directory of Java process is not physically the same directory
|
||||
as `/tmp` of your shell, because Java is running in a container or in
|
||||
`chroot` environment. `asprof` attempts to solve this automatically,
|
||||
but it might lack the required permissions to do so.
|
||||
- Check `strace asprof PID jcmd`
|
||||
4. JVM is busy and cannot reach a safepoint. For instance,
|
||||
JVM is in the middle of long-running garbage collection.
|
||||
- How to check: run `kill -3 PID`. Healthy JVM process should print
|
||||
a thread dump and heap info in its console.
|
||||
|
||||
### Target JVM failed to load libasyncProfiler.so
|
||||
|
||||
The connection with the target JVM has been established, but JVM is unable to load profiler shared library.
|
||||
Make sure the user of JVM process has permissions to access `libasyncProfiler.so` by exactly the same absolute path.
|
||||
For more information see [#78](https://github.com/async-profiler/async-profiler/issues/78).
|
||||
|
||||
### Perf events unavailable
|
||||
|
||||
`perf_event_open()` syscall has failed. Typical reasons include:
|
||||
|
||||
1. `/proc/sys/kernel/perf_event_paranoid` is set to restricted mode (>=2).
|
||||
2. seccomp disables `perf_event_open` API in a container.
|
||||
3. OS runs under a hypervisor that does not virtualize performance counters.
|
||||
4. perf_event_open API is not supported on this system, e.g. WSL.
|
||||
|
||||
<br>For permissions-related reasons (such as 1 and 2), using `--fdtransfer` while running the profiler
|
||||
as a privileged user may solve the issue.
|
||||
|
||||
If changing the configuration is not possible, you may fall back to
|
||||
`-e ctimer` profiling mode. It is similar to `cpu` mode, but does not
|
||||
require perf_events support. As a drawback, there will be no kernel
|
||||
stack traces.
|
||||
|
||||
### No AllocTracer symbols found. Are JDK debug symbols installed?
|
||||
|
||||
The OpenJDK debug symbols are required for allocation profiling for applications developed
|
||||
with JDK prior to 11. See [Installing Debug Symbols](ProfilingModes.md#installing-debug-symbols) for more
|
||||
details. If the error message persists after a successful installation of the debug symbols,
|
||||
it is possible that the JDK was upgraded when installing the debug symbols.
|
||||
In this case, profiling any Java process which had started prior to the installation
|
||||
will continue to display this message, since the process had loaded
|
||||
the older version of the JDK which lacked debug symbols.
|
||||
Restarting the affected Java processes should resolve the issue.
|
||||
|
||||
### VMStructs unavailable. Unsupported JVM?
|
||||
|
||||
JVM shared library does not export `gHotSpotVMStructs*` symbols -
|
||||
apparently this is not a HotSpot JVM. Sometimes the same message
|
||||
can be also caused by an incorrectly built JDK
|
||||
(see [#218](https://github.com/async-profiler/async-profiler/issues/218)).
|
||||
In these cases installing JDK debug symbols may solve the problem.
|
||||
|
||||
### Could not parse symbols from <libname.so>
|
||||
|
||||
Async-profiler was unable to parse non-Java function names because of
|
||||
the corrupted contents in `/proc/[pid]/maps`. The problem is known to
|
||||
occur in a container when running Ubuntu with Linux kernel 5.x.
|
||||
This is the OS bug, see <https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1843018>.
|
||||
|
||||
### Could not open output file
|
||||
|
||||
Output file is written by the target JVM process, not by the profiler script.
|
||||
Make sure the path specified in `-f` option is correct and is accessible by the JVM.
|
||||
|
||||
## Known Limitations
|
||||
|
||||
- No Java stacks will be collected if `-XX:MaxJavaStackTraceDepth` is zero
|
||||
or negative. The exception is `--cstack vm` mode, which does not take
|
||||
`MaxJavaStackTraceDepth` into account.
|
||||
|
||||
- Too short profiling interval may cause continuous interruption of heavy
|
||||
system calls like `clone()`, so that it will never complete;
|
||||
see [#97](https://github.com/async-profiler/async-profiler/issues/97).
|
||||
The workaround is simply to increase the interval.
|
||||
|
||||
- When agent is not loaded at JVM startup (by using -agentpath option) it is
|
||||
highly recommended to use `-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints` JVM flags.
|
||||
Without those flags the profiler will still work correctly but results might be
|
||||
less accurate. For example, without `-XX:+DebugNonSafepoints` there is a high chance
|
||||
that simple inlined methods will not appear in the profile. When the agent is attached at runtime,
|
||||
`CompiledMethodLoad` JVMTI event enables debug info, but only for methods compiled after attaching.
|
||||
|
||||
- On most Linux systems, `perf_events` captures call stacks with a maximum depth
|
||||
of 127 frames. On recent Linux kernels, this can be configured using
|
||||
`sysctl kernel.perf_event_max_stack` or by writing to the
|
||||
`/proc/sys/kernel/perf_event_max_stack` file.
|
||||
|
||||
- You will not see the non-Java frames _preceding_ the Java frames on the
|
||||
stack, unless `--cstack vmx` is specified.
|
||||
For example, if `start_thread` called `JavaMain` and then your Java
|
||||
code started running, you will not see the first two frames in the resulting
|
||||
stack. On the other hand, you _will_ see non-Java frames (user and kernel)
|
||||
invoked by your Java code.
|
||||
|
||||
- macOS profiling is limited to user space code only.
|
||||
@@ -3,7 +3,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>tools.profiler</groupId>
|
||||
<artifactId>jfr-converter</artifactId>
|
||||
<version>3.0</version>
|
||||
<version>4.4</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>async-profiler</name>
|
||||
@@ -57,7 +57,7 @@
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<mainClass>Main</mainClass>
|
||||
<mainClass>one.convert.Main</mainClass>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
@@ -102,17 +102,15 @@
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.sonatype.central</groupId>
|
||||
<artifactId>central-publishing-maven-plugin</artifactId>
|
||||
<version>0.8.0</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<publishingServerId>central</publishingServerId>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
</snapshotRepository>
|
||||
<repository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/service/local/staging/deploy/maven2</url>
|
||||
</repository>
|
||||
</distributionManagement>
|
||||
</project>
|
||||
|
||||
60
pom.xml
@@ -3,7 +3,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>tools.profiler</groupId>
|
||||
<artifactId>async-profiler</artifactId>
|
||||
<version>3.0</version>
|
||||
<version>4.4</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>async-profiler</name>
|
||||
@@ -56,19 +56,53 @@
|
||||
<version>3.3.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>linux-x64-jar</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<classifier>${native.platform}</classifier>
|
||||
<classifier>linux-x64</classifier>
|
||||
<includes>
|
||||
<include>${native.platform}/*</include>
|
||||
<include>linux-x64/*</include>
|
||||
<include>one/**</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>linux-arm64-jar</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<classifier>linux-arm64</classifier>
|
||||
<includes>
|
||||
<include>linux-arm64/*</include>
|
||||
<include>one/**</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>macos-jar</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<classifier>macos</classifier>
|
||||
<includes>
|
||||
<include>macos/*</include>
|
||||
<include>one/**</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifestFile>src/api/one/profiler/MANIFEST.MF</manifestFile>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
@@ -116,17 +150,15 @@
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.sonatype.central</groupId>
|
||||
<artifactId>central-publishing-maven-plugin</artifactId>
|
||||
<version>0.8.0</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<publishingServerId>central</publishingServerId>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
</snapshotRepository>
|
||||
<repository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/service/local/staging/deploy/maven2</url>
|
||||
</repository>
|
||||
</distributionManagement>
|
||||
</project>
|
||||
|
||||
@@ -18,6 +18,33 @@ u64 AllocTracer::_interval;
|
||||
volatile u64 AllocTracer::_allocated_bytes;
|
||||
|
||||
|
||||
Error AllocTracer::initialize() {
|
||||
if (_in_new_tlab.entry() == 0 || _outside_tlab.entry() == 0) {
|
||||
CodeCache* libjvm = VMStructs::libjvm();
|
||||
const void* ne;
|
||||
const void* oe;
|
||||
|
||||
if ((ne = libjvm->findSymbolByPrefix("_ZN11AllocTracer27send_allocation_in_new_tlab")) != NULL &&
|
||||
(oe = libjvm->findSymbolByPrefix("_ZN11AllocTracer28send_allocation_outside_tlab")) != NULL) {
|
||||
_trap_kind = 1; // JDK 10+
|
||||
} else if ((ne = libjvm->findSymbolByPrefix("_ZN11AllocTracer33send_allocation_in_new_tlab_eventE11KlassHandleP8HeapWord")) != NULL &&
|
||||
(oe = libjvm->findSymbolByPrefix("_ZN11AllocTracer34send_allocation_outside_tlab_eventE11KlassHandleP8HeapWord")) != NULL) {
|
||||
_trap_kind = 1; // JDK 8u262+
|
||||
} else if ((ne = libjvm->findSymbolByPrefix("_ZN11AllocTracer33send_allocation_in_new_tlab_event")) != NULL &&
|
||||
(oe = libjvm->findSymbolByPrefix("_ZN11AllocTracer34send_allocation_outside_tlab_event")) != NULL) {
|
||||
_trap_kind = 2; // JDK 7-9
|
||||
} else {
|
||||
return Error("No AllocTracer symbols found. Are JDK debug symbols installed?");
|
||||
}
|
||||
|
||||
_in_new_tlab.assign(ne);
|
||||
_outside_tlab.assign(oe);
|
||||
_in_new_tlab.pair(_outside_tlab);
|
||||
}
|
||||
|
||||
return Error::OK;
|
||||
}
|
||||
|
||||
// Called whenever our breakpoint trap is hit
|
||||
void AllocTracer::trapHandler(int signo, siginfo_t* siginfo, void* ucontext) {
|
||||
StackFrame frame(ucontext);
|
||||
@@ -69,44 +96,15 @@ void AllocTracer::recordAllocation(void* ucontext, EventType event_type, uintptr
|
||||
Profiler::instance()->recordSample(ucontext, total_size, event_type, &event);
|
||||
}
|
||||
|
||||
Error AllocTracer::check(Arguments& args) {
|
||||
if (args._live) {
|
||||
Error AllocTracer::start(Arguments& args) {
|
||||
if (args._live && !args._all) {
|
||||
// This engine is only going to be selected in Profiler::selectAllocEngine
|
||||
// when can_generate_sampled_object_alloc_events is not available, i.e. JDK<11.
|
||||
return Error("'live' option is supported on OpenJDK 11+");
|
||||
}
|
||||
|
||||
if (_in_new_tlab.entry() != 0 && _outside_tlab.entry() != 0) {
|
||||
return Error::OK;
|
||||
}
|
||||
|
||||
CodeCache* libjvm = VMStructs::libjvm();
|
||||
const void* ne;
|
||||
const void* oe;
|
||||
|
||||
if ((ne = libjvm->findSymbolByPrefix("_ZN11AllocTracer27send_allocation_in_new_tlab")) != NULL &&
|
||||
(oe = libjvm->findSymbolByPrefix("_ZN11AllocTracer28send_allocation_outside_tlab")) != NULL) {
|
||||
_trap_kind = 1; // JDK 10+
|
||||
} else if ((ne = libjvm->findSymbolByPrefix("_ZN11AllocTracer33send_allocation_in_new_tlab_eventE11KlassHandleP8HeapWord")) != NULL &&
|
||||
(oe = libjvm->findSymbolByPrefix("_ZN11AllocTracer34send_allocation_outside_tlab_eventE11KlassHandleP8HeapWord")) != NULL) {
|
||||
_trap_kind = 1; // JDK 8u262+
|
||||
} else if ((ne = libjvm->findSymbolByPrefix("_ZN11AllocTracer33send_allocation_in_new_tlab_event")) != NULL &&
|
||||
(oe = libjvm->findSymbolByPrefix("_ZN11AllocTracer34send_allocation_outside_tlab_event")) != NULL) {
|
||||
_trap_kind = 2; // JDK 7-9
|
||||
} else {
|
||||
return Error("No AllocTracer symbols found. Are JDK debug symbols installed?");
|
||||
}
|
||||
|
||||
_in_new_tlab.assign(ne);
|
||||
_outside_tlab.assign(oe);
|
||||
_in_new_tlab.pair(_outside_tlab);
|
||||
|
||||
return Error::OK;
|
||||
}
|
||||
|
||||
Error AllocTracer::start(Arguments& args) {
|
||||
Error error = check(args);
|
||||
if (error) {
|
||||
return error;
|
||||
}
|
||||
Error error = initialize();
|
||||
if (error) return error;
|
||||
|
||||
_interval = args._alloc > 0 ? args._alloc : 0;
|
||||
_allocated_bytes = 0;
|
||||
|
||||
@@ -22,10 +22,15 @@ class AllocTracer : public Engine {
|
||||
static u64 _interval;
|
||||
static volatile u64 _allocated_bytes;
|
||||
|
||||
static Error initialize();
|
||||
static void recordAllocation(void* ucontext, EventType event_type, uintptr_t rklass,
|
||||
uintptr_t total_size, uintptr_t instance_size);
|
||||
|
||||
public:
|
||||
const char* type() {
|
||||
return "alloc_tracer";
|
||||
}
|
||||
|
||||
const char* title() {
|
||||
return "Allocation profile";
|
||||
}
|
||||
@@ -34,7 +39,6 @@ class AllocTracer : public Engine {
|
||||
return "bytes";
|
||||
}
|
||||
|
||||
Error check(Arguments& args);
|
||||
Error start(Arguments& args);
|
||||
void stop();
|
||||
|
||||
|
||||
26
src/api/one/profiler/Agent.java
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.profiler;
|
||||
|
||||
import javax.management.ObjectName;
|
||||
import java.lang.management.ManagementFactory;
|
||||
|
||||
public class Agent {
|
||||
|
||||
public static void premain(String args) throws Exception {
|
||||
agentmain(args);
|
||||
}
|
||||
|
||||
public static void agentmain(String args) throws Exception {
|
||||
AsyncProfiler profiler = AsyncProfiler.getInstance();
|
||||
ManagementFactory.getPlatformMBeanServer().registerMBean(
|
||||
profiler,
|
||||
new ObjectName(AsyncProfilerMXBean.OBJECT_NAME));
|
||||
if (args != null && !args.isEmpty()) {
|
||||
profiler.execute(args);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -39,16 +39,22 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
// No need to load library, if it has been preloaded with -agentpath
|
||||
profiler.getVersion();
|
||||
} catch (UnsatisfiedLinkError e) {
|
||||
File file = extractEmbeddedLib();
|
||||
if (file != null) {
|
||||
try {
|
||||
System.load(file.getPath());
|
||||
} finally {
|
||||
file.delete();
|
||||
}
|
||||
String libraryPath = System.getProperty("one.profiler.libraryPath");
|
||||
if (libraryPath != null && !libraryPath.isEmpty()) {
|
||||
System.load(new File(libraryPath).getAbsolutePath());
|
||||
} else {
|
||||
System.loadLibrary("asyncProfiler");
|
||||
File file = extractEmbeddedLib();
|
||||
if (file != null) {
|
||||
try {
|
||||
System.load(file.getAbsolutePath());
|
||||
} finally {
|
||||
file.delete();
|
||||
}
|
||||
} else {
|
||||
System.loadLibrary("asyncProfiler");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,7 +177,7 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
|
||||
/**
|
||||
* Execute an agent-compatible profiling command -
|
||||
* the comma-separated list of arguments described in arguments.cpp
|
||||
* the comma-separated list of arguments defined in arguments.cpp
|
||||
*
|
||||
* @param command Profiling command
|
||||
* @return The command result
|
||||
@@ -195,7 +201,7 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
@Override
|
||||
public String dumpCollapsed(Counter counter) {
|
||||
try {
|
||||
return execute0("collapsed," + counter.name().toLowerCase());
|
||||
return execute0("collapsed," + (counter == Counter.SAMPLES ? "samples" : "total"));
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
@@ -231,6 +237,23 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump collected data in OTLP format.
|
||||
* <p>
|
||||
* This API is UNSTABLE and might change or be removed in the next version of async-profiler.
|
||||
*
|
||||
* @param counter Which counter to use for aggregation
|
||||
* @return OTLP representation of the profile
|
||||
*/
|
||||
@Override
|
||||
public byte[] dumpOtlp(Counter counter) {
|
||||
try {
|
||||
return execute1("otlp," + (counter == Counter.SAMPLES ? "samples" : "total"));
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the given thread to the set of profiled threads.
|
||||
* 'filter' option must be enabled to use this method.
|
||||
@@ -271,5 +294,7 @@ public class AsyncProfiler implements AsyncProfilerMXBean {
|
||||
|
||||
private native String execute0(String command) throws IllegalArgumentException, IllegalStateException, IOException;
|
||||
|
||||
private native byte[] execute1(String command) throws IllegalArgumentException, IllegalStateException, IOException;
|
||||
|
||||
private native void filterThread0(Thread thread, boolean enable);
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@ package one.profiler;
|
||||
* }</pre>
|
||||
*/
|
||||
public interface AsyncProfilerMXBean {
|
||||
String OBJECT_NAME = "one.profiler:type=AsyncProfiler";
|
||||
|
||||
void start(String event, long interval) throws IllegalStateException;
|
||||
void resume(String event, long interval) throws IllegalStateException;
|
||||
void stop() throws IllegalStateException;
|
||||
@@ -29,4 +31,5 @@ public interface AsyncProfilerMXBean {
|
||||
String dumpCollapsed(Counter counter);
|
||||
String dumpTraces(int maxTraces);
|
||||
String dumpFlat(int maxMethods);
|
||||
byte[] dumpOtlp(Counter counter);
|
||||
}
|
||||
|
||||
2
src/api/one/profiler/MANIFEST.MF
Normal file
@@ -0,0 +1,2 @@
|
||||
Agent-Class: one.profiler.Agent
|
||||
Premain-Class: one.profiler.Agent
|
||||
80
src/arch.h
@@ -7,28 +7,46 @@
|
||||
#define _ARCH_H
|
||||
|
||||
|
||||
#ifndef likely
|
||||
# define likely(x) (__builtin_expect(!!(x), 1))
|
||||
#endif
|
||||
|
||||
#ifndef unlikely
|
||||
# define unlikely(x) (__builtin_expect(!!(x), 0))
|
||||
#endif
|
||||
|
||||
#ifdef _LP64
|
||||
# define LP64_ONLY(code) code
|
||||
#else // !_LP64
|
||||
# define LP64_ONLY(code)
|
||||
#endif // _LP64
|
||||
|
||||
|
||||
typedef unsigned char u8;
|
||||
typedef unsigned short u16;
|
||||
typedef unsigned int u32;
|
||||
typedef unsigned long long u64;
|
||||
|
||||
static inline u64 atomicInc(volatile u64& var, u64 increment = 1) {
|
||||
return __sync_fetch_and_add(&var, increment);
|
||||
template<typename T>
|
||||
static inline T atomicInc(T& var, T increment = 1) {
|
||||
return __atomic_fetch_add(&var, increment, __ATOMIC_ACQ_REL);
|
||||
}
|
||||
|
||||
static inline int atomicInc(volatile int& var, int increment = 1) {
|
||||
return __sync_fetch_and_add(&var, increment);
|
||||
template<typename T>
|
||||
static inline T atomicDec(T& var, T decrement = 1) {
|
||||
return __atomic_fetch_sub(&var, decrement, __ATOMIC_ACQ_REL);
|
||||
}
|
||||
|
||||
static inline u64 loadAcquire(u64& var) {
|
||||
template<typename T>
|
||||
static inline T loadAcquire(T& var) {
|
||||
return __atomic_load_n(&var, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
static inline void storeRelease(u64& var, u64 value) {
|
||||
return __atomic_store_n(&var, value, __ATOMIC_RELEASE);
|
||||
template<typename T, typename U>
|
||||
static inline void storeRelease(T& var, U value) {
|
||||
__atomic_store_n(&var, static_cast<T>(value), __ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
|
||||
typedef unsigned char instruction_t;
|
||||
@@ -37,7 +55,6 @@ const int BREAKPOINT_OFFSET = 0;
|
||||
|
||||
const int SYSCALL_SIZE = 2;
|
||||
const int FRAME_PC_SLOT = 1;
|
||||
const int PROBE_SP_LIMIT = 4;
|
||||
const int PLT_HEADER_SIZE = 16;
|
||||
const int PLT_ENTRY_SIZE = 16;
|
||||
const int PERF_REG_PC = 8; // PERF_REG_X86_IP
|
||||
@@ -46,6 +63,10 @@ const int PERF_REG_PC = 8; // PERF_REG_X86_IP
|
||||
#define rmb() asm volatile("lfence" : : : "memory")
|
||||
#define flushCache(addr) asm volatile("mfence; clflush (%0); mfence" : : "r" (addr) : "memory")
|
||||
|
||||
#define callerPC() __builtin_return_address(0)
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() ((void**)__builtin_frame_address(0) + 2)
|
||||
|
||||
#elif defined(__arm__) || defined(__thumb__)
|
||||
|
||||
typedef unsigned int instruction_t;
|
||||
@@ -55,7 +76,6 @@ const int BREAKPOINT_OFFSET = 0;
|
||||
|
||||
const int SYSCALL_SIZE = sizeof(instruction_t);
|
||||
const int FRAME_PC_SLOT = 1;
|
||||
const int PROBE_SP_LIMIT = 0;
|
||||
const int PLT_HEADER_SIZE = 20;
|
||||
const int PLT_ENTRY_SIZE = 12;
|
||||
const int PERF_REG_PC = 15; // PERF_REG_ARM_PC
|
||||
@@ -64,6 +84,10 @@ const int PERF_REG_PC = 15; // PERF_REG_ARM_PC
|
||||
#define rmb() asm volatile("dmb ish" : : : "memory")
|
||||
#define flushCache(addr) __builtin___clear_cache((char*)(addr), (char*)(addr) + sizeof(instruction_t))
|
||||
|
||||
#define callerPC() __builtin_return_address(0)
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() __builtin_frame_address(1)
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
|
||||
typedef unsigned int instruction_t;
|
||||
@@ -72,7 +96,6 @@ const int BREAKPOINT_OFFSET = 0;
|
||||
|
||||
const int SYSCALL_SIZE = sizeof(instruction_t);
|
||||
const int FRAME_PC_SLOT = 1;
|
||||
const int PROBE_SP_LIMIT = 0;
|
||||
const int PLT_HEADER_SIZE = 32;
|
||||
const int PLT_ENTRY_SIZE = 16;
|
||||
const int PERF_REG_PC = 32; // PERF_REG_ARM64_PC
|
||||
@@ -81,6 +104,10 @@ const int PERF_REG_PC = 32; // PERF_REG_ARM64_PC
|
||||
#define rmb() asm volatile("dmb ish" : : : "memory")
|
||||
#define flushCache(addr) __builtin___clear_cache((char*)(addr), (char*)(addr) + sizeof(instruction_t))
|
||||
|
||||
#define callerPC() ({ void* pc; asm volatile("adr %0, ." : "=r"(pc)); pc; })
|
||||
#define callerFP() ({ void* fp; asm volatile("mov %0, fp" : "=r"(fp)); fp; })
|
||||
#define callerSP() ({ void* sp; asm volatile("mov %0, sp" : "=r"(sp)); sp; })
|
||||
|
||||
#elif defined(__PPC64__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||
|
||||
typedef unsigned int instruction_t;
|
||||
@@ -91,7 +118,6 @@ const int BREAKPOINT_OFFSET = 8;
|
||||
|
||||
const int SYSCALL_SIZE = sizeof(instruction_t);
|
||||
const int FRAME_PC_SLOT = 2;
|
||||
const int PROBE_SP_LIMIT = 0;
|
||||
const int PLT_HEADER_SIZE = 24;
|
||||
const int PLT_ENTRY_SIZE = 24;
|
||||
const int PERF_REG_PC = 32; // PERF_REG_POWERPC_NIP
|
||||
@@ -100,6 +126,10 @@ const int PERF_REG_PC = 32; // PERF_REG_POWERPC_NIP
|
||||
#define rmb() asm volatile ("sync" : : : "memory") // lwsync would do but better safe than sorry
|
||||
#define flushCache(addr) __builtin___clear_cache((char*)(addr), (char*)(addr) + sizeof(instruction_t))
|
||||
|
||||
#define callerPC() __builtin_return_address(0)
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() __builtin_frame_address(0)
|
||||
|
||||
#elif defined(__riscv) && (__riscv_xlen == 64)
|
||||
|
||||
typedef unsigned int instruction_t;
|
||||
@@ -112,7 +142,6 @@ const int BREAKPOINT_OFFSET = 0;
|
||||
|
||||
const int SYSCALL_SIZE = sizeof(instruction_t);
|
||||
const int FRAME_PC_SLOT = 1; // return address is at -1 from FP
|
||||
const int PROBE_SP_LIMIT = 0;
|
||||
const int PLT_HEADER_SIZE = 24; // Best guess from examining readelf
|
||||
const int PLT_ENTRY_SIZE = 24; // ...same...
|
||||
const int PERF_REG_PC = 0; // PERF_REG_RISCV_PC
|
||||
@@ -121,6 +150,10 @@ const int PERF_REG_PC = 0; // PERF_REG_RISCV_PC
|
||||
#define rmb() asm volatile ("fence" : : : "memory")
|
||||
#define flushCache(addr) __builtin___clear_cache((char*)(addr), (char*)(addr) + sizeof(instruction_t))
|
||||
|
||||
#define callerPC() __builtin_return_address(0)
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() __builtin_frame_address(0)
|
||||
|
||||
#elif defined(__loongarch_lp64)
|
||||
|
||||
typedef unsigned int instruction_t;
|
||||
@@ -129,7 +162,6 @@ const int BREAKPOINT_OFFSET = 0;
|
||||
|
||||
const int SYSCALL_SIZE = sizeof(instruction_t);
|
||||
const int FRAME_PC_SLOT = 1;
|
||||
const int PROBE_SP_LIMIT = 0;
|
||||
const int PLT_HEADER_SIZE = 32;
|
||||
const int PLT_ENTRY_SIZE = 16;
|
||||
const int PERF_REG_PC = 0; // PERF_REG_LOONGARCH_PC
|
||||
@@ -138,6 +170,10 @@ const int PERF_REG_PC = 0; // PERF_REG_LOONGARCH_PC
|
||||
#define rmb() asm volatile("dbar 0x0" : : : "memory")
|
||||
#define flushCache(addr) __builtin___clear_cache((char*)(addr), (char*)(addr) + sizeof(instruction_t))
|
||||
|
||||
#define callerPC() __builtin_return_address(0)
|
||||
#define callerFP() __builtin_frame_address(1)
|
||||
#define callerSP() __builtin_frame_address(0)
|
||||
|
||||
#else
|
||||
|
||||
#error "Compiling on unsupported arch"
|
||||
@@ -145,18 +181,20 @@ const int PERF_REG_PC = 0; // PERF_REG_LOONGARCH_PC
|
||||
#endif
|
||||
|
||||
|
||||
// Return address signing support.
|
||||
// Apple M1 has 47 bit virtual addresses.
|
||||
// On Apple M1 and later processors, memory is either writable or executable (W^X)
|
||||
#if defined(__aarch64__) && defined(__APPLE__)
|
||||
# define ADDRESS_BITS 47
|
||||
# define WX_MEMORY true
|
||||
# define WX_MEMORY true
|
||||
#else
|
||||
# define WX_MEMORY false
|
||||
# define WX_MEMORY false
|
||||
#endif
|
||||
|
||||
#ifdef ADDRESS_BITS
|
||||
// Pointer authentication (PAC) support.
|
||||
// Only 48-bit virtual addresses are currently supported.
|
||||
#ifdef __aarch64__
|
||||
const unsigned long PAC_MASK = WX_MEMORY ? 0x7fffffffffffUL : 0xffffffffffffUL;
|
||||
|
||||
static inline const void* stripPointer(const void* p) {
|
||||
return (const void*) ((unsigned long)p & ((1UL << ADDRESS_BITS) - 1));
|
||||
return (const void*) ((unsigned long)p & PAC_MASK);
|
||||
}
|
||||
#else
|
||||
# define stripPointer(p) (p)
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include "arguments.h"
|
||||
#include "os.h"
|
||||
|
||||
|
||||
// Arguments of the last start/resume command; reused for shutdown and restart
|
||||
@@ -22,12 +23,6 @@ const Error Error::OK(NULL);
|
||||
// Extra buffer space for expanding file pattern
|
||||
const size_t EXTRA_BUF_SIZE = 512;
|
||||
|
||||
static const Multiplier NANOS[] = {{'n', 1}, {'u', 1000}, {'m', 1000000}, {'s', 1000000000}, {0, 0}};
|
||||
static const Multiplier BYTES[] = {{'b', 1}, {'k', 1024}, {'m', 1048576}, {'g', 1073741824}, {0, 0}};
|
||||
static const Multiplier SECONDS[] = {{'s', 1}, {'m', 60}, {'h', 3600}, {'d', 86400}, {0, 0}};
|
||||
static const Multiplier UNIVERSAL[] = {{'n', 1}, {'u', 1000}, {'m', 1000000}, {'s', 1000000000}, {'b', 1}, {'k', 1024}, {'g', 1073741824}, {0, 0}};
|
||||
|
||||
|
||||
// Statically compute hash code of a string containing up to 12 [a-z] letters
|
||||
#define HASH(s) ((s[0] & 31LL) | (s[1] & 31LL) << 5 | (s[2] & 31LL) << 10 | (s[3] & 31LL) << 15 | \
|
||||
(s[4] & 31LL) << 20 | (s[5] & 31LL) << 25 | (s[6] & 31LL) << 30 | (s[7] & 31LL) << 35 | \
|
||||
@@ -42,72 +37,7 @@ static const Multiplier UNIVERSAL[] = {{'n', 1}, {'u', 1000}, {'m', 1000000}, {'
|
||||
|
||||
|
||||
// Parses agent arguments.
|
||||
// The format of the string is:
|
||||
// arg[,arg...]
|
||||
// where arg is one of the following options:
|
||||
// start - start profiling
|
||||
// resume - start or resume profiling without resetting collected data
|
||||
// stop - stop profiling
|
||||
// dump - dump collected data without stopping profiling session
|
||||
// check - check if the specified profiling event is available
|
||||
// status - print profiling status (inactive / running for X seconds)
|
||||
// meminfo - print profiler memory stats
|
||||
// list - show the list of available profiling events
|
||||
// version - display the agent version
|
||||
// event=EVENT - which event to trace (cpu, wall, cache-misses, etc.)
|
||||
// alloc[=BYTES] - profile allocations with BYTES interval
|
||||
// live - build allocation profile from live objects only
|
||||
// lock[=DURATION] - profile contended locks longer than DURATION ns
|
||||
// wall[=NS] - run wall clock profiling together with CPU profiling
|
||||
// collapsed - dump collapsed stacks (the format used by FlameGraph script)
|
||||
// flamegraph - produce Flame Graph in HTML format
|
||||
// tree - produce call tree in HTML format
|
||||
// jfr - dump events in Java Flight Recorder format
|
||||
// jfropts=OPTIONS - JFR recording options: numeric bitmask or 'mem'
|
||||
// jfrsync[=CONFIG] - start Java Flight Recording with the given config along with the profiler
|
||||
// traces[=N] - dump top N call traces
|
||||
// flat[=N] - dump top N methods (aka flat profile)
|
||||
// samples - count the number of samples (default)
|
||||
// total - count the total value (time, bytes, etc.) instead of samples
|
||||
// chunksize=N - approximate size of JFR chunk in bytes (default: 100 MB)
|
||||
// chunktime=N - duration of JFR chunk in seconds (default: 1 hour)
|
||||
// timeout=TIME - automatically stop profiler at TIME (absolute or relative)
|
||||
// loop=TIME - run profiler in a loop (continuous profiling)
|
||||
// interval=N - sampling interval in ns (default: 10'000'000, i.e. 10 ms)
|
||||
// jstackdepth=N - maximum Java stack depth (default: 2048)
|
||||
// signal=N - use alternative signal for cpu or wall clock profiling
|
||||
// features=LIST - advanced stack trace features (vtable, comptask)"
|
||||
// safemode=BITS - disable stack recovery techniques (default: 0, i.e. everything enabled)
|
||||
// file=FILENAME - output file name for dumping
|
||||
// log=FILENAME - log warnings and errors to the given dedicated stream
|
||||
// loglevel=LEVEL - logging level: TRACE, DEBUG, INFO, WARN, ERROR, or NONE
|
||||
// server=ADDRESS - start insecure HTTP server at ADDRESS/PORT
|
||||
// filter=FILTER - thread filter
|
||||
// threads - profile different threads separately
|
||||
// sched - group threads by scheduling policy
|
||||
// cstack=MODE - how to collect C stack frames in addition to Java stack
|
||||
// MODE is 'fp', 'dwarf', 'lbr', 'vm' or 'no'
|
||||
// clock=SOURCE - clock source for JFR timestamps: 'tsc' or 'monotonic'
|
||||
// allkernel - include only kernel-mode events
|
||||
// alluser - include only user-mode events
|
||||
// fdtransfer - use fdtransfer to pass fds to the profiler
|
||||
// simple - simple class names instead of FQN
|
||||
// dot - dotted class names
|
||||
// norm - normalize names of hidden classes / lambdas
|
||||
// sig - print method signatures
|
||||
// ann - annotate Java methods
|
||||
// lib - prepend library names
|
||||
// mcache - max age of jmethodID cache (default: 0 = disabled)
|
||||
// include=PATTERN - include stack traces containing PATTERN
|
||||
// exclude=PATTERN - exclude stack traces containing PATTERN
|
||||
// begin=FUNCTION - begin profiling when FUNCTION is executed
|
||||
// end=FUNCTION - end profiling when FUNCTION is executed
|
||||
// title=TITLE - FlameGraph title
|
||||
// minwidth=PCT - FlameGraph minimum frame width in percent
|
||||
// reverse - generate stack-reversed FlameGraph / Call tree
|
||||
//
|
||||
// It is possible to specify multiple dump options at the same time
|
||||
|
||||
// The format of the string is: arg[,arg...]
|
||||
Error Arguments::parse(const char* args) {
|
||||
if (args == NULL) {
|
||||
return Error::OK;
|
||||
@@ -121,7 +51,7 @@ Error Arguments::parse(const char* args) {
|
||||
}
|
||||
char* args_copy = strcpy(_buf + EXTRA_BUF_SIZE, args);
|
||||
|
||||
const char* msg = NULL;
|
||||
const char* msg = NULL;
|
||||
|
||||
for (char* arg = strtok(args_copy, ","); arg != NULL; arg = strtok(NULL, ",")) {
|
||||
char* value = strchr(arg, '=');
|
||||
@@ -141,14 +71,11 @@ Error Arguments::parse(const char* args) {
|
||||
CASE("dump")
|
||||
_action = ACTION_DUMP;
|
||||
|
||||
CASE("check")
|
||||
_action = ACTION_CHECK;
|
||||
|
||||
CASE("status")
|
||||
_action = ACTION_STATUS;
|
||||
|
||||
CASE("meminfo")
|
||||
_action = ACTION_MEMINFO;
|
||||
CASE("metrics")
|
||||
_action = ACTION_METRICS;
|
||||
|
||||
CASE("list")
|
||||
_action = ACTION_LIST;
|
||||
@@ -192,6 +119,9 @@ Error Arguments::parse(const char* args) {
|
||||
_output = OUTPUT_TEXT;
|
||||
_dump_flat = value == NULL ? INT_MAX : atoi(value);
|
||||
|
||||
CASE("otlp")
|
||||
_output = OUTPUT_OTLP;
|
||||
|
||||
CASE("samples")
|
||||
_counter = COUNTER_SAMPLES;
|
||||
|
||||
@@ -214,9 +144,13 @@ Error Arguments::parse(const char* args) {
|
||||
msg = "event must not be empty";
|
||||
} else if (strcmp(value, EVENT_ALLOC) == 0) {
|
||||
if (_alloc < 0) _alloc = 0;
|
||||
} else if (strcmp(value, EVENT_NATIVEMEM) == 0) {
|
||||
if (_nativemem < 0) _nativemem = 0;
|
||||
} else if (strcmp(value, EVENT_LOCK) == 0) {
|
||||
if (_lock < 0) _lock = 0;
|
||||
} else if (_event != NULL) {
|
||||
if (_lock < 0) _lock = DEFAULT_LOCK_INTERVAL;
|
||||
} else if (strcmp(value, EVENT_NATIVELOCK) == 0) {
|
||||
if (_nativelock < 0) _nativelock = DEFAULT_LOCK_INTERVAL;
|
||||
} else if (_event != NULL && !_all) {
|
||||
msg = "Duplicate event argument";
|
||||
} else {
|
||||
_event = value;
|
||||
@@ -228,20 +162,40 @@ Error Arguments::parse(const char* args) {
|
||||
}
|
||||
|
||||
CASE("loop")
|
||||
_loop = true;
|
||||
if (value == NULL || (_timeout = parseTimeout(value)) == -1) {
|
||||
if (value == NULL || (_loop = parseTimeout(value)) == -1) {
|
||||
msg = "Invalid loop duration";
|
||||
}
|
||||
|
||||
CASE("memlimit")
|
||||
_mem_limit = value == NULL ? 0 : parseUnits(value, BYTES);
|
||||
|
||||
CASE("alloc")
|
||||
_alloc = value == NULL ? 0 : parseUnits(value, BYTES);
|
||||
|
||||
CASE("tlab")
|
||||
_tlab = true;
|
||||
|
||||
CASE("nativemem")
|
||||
_nativemem = value == NULL ? 0 : parseUnits(value, BYTES);
|
||||
|
||||
CASE("nofree")
|
||||
_nofree = true;
|
||||
|
||||
CASE("trace")
|
||||
_trace.push_back(value);
|
||||
|
||||
CASE("lock")
|
||||
_lock = value == NULL ? 0 : parseUnits(value, NANOS);
|
||||
_lock = value == NULL ? DEFAULT_LOCK_INTERVAL : parseUnits(value, NANOS);
|
||||
|
||||
CASE("nativelock")
|
||||
_nativelock = value == NULL ? DEFAULT_LOCK_INTERVAL : parseUnits(value, NANOS);
|
||||
|
||||
CASE("wall")
|
||||
_wall = value == NULL ? 0 : parseUnits(value, NANOS);
|
||||
|
||||
CASE("proc")
|
||||
_proc = value == NULL ? DEFAULT_PROC_INTERVAL : parseUnits(value, SECONDS);
|
||||
|
||||
CASE("cpu")
|
||||
if (_event != NULL) {
|
||||
msg = "Duplicate event argument";
|
||||
@@ -249,6 +203,33 @@ Error Arguments::parse(const char* args) {
|
||||
_event = EVENT_CPU;
|
||||
}
|
||||
|
||||
CASE("all")
|
||||
_all = true;
|
||||
_live = true;
|
||||
if (_wall < 0) {
|
||||
_wall = 0;
|
||||
}
|
||||
if (_alloc < 0) {
|
||||
_alloc = 0;
|
||||
}
|
||||
if (_lock < 0) {
|
||||
_lock = DEFAULT_LOCK_INTERVAL;
|
||||
}
|
||||
if (_nativelock < 0) {
|
||||
_nativelock = DEFAULT_LOCK_INTERVAL;
|
||||
}
|
||||
if (_nativemem < 0) {
|
||||
_nativemem = DEFAULT_ALLOC_INTERVAL;
|
||||
}
|
||||
|
||||
if (_proc < 0 && OS::isLinux()) {
|
||||
_proc = DEFAULT_PROC_INTERVAL;
|
||||
}
|
||||
|
||||
if (_event == NULL && OS::isLinux()) {
|
||||
_event = EVENT_CPU;
|
||||
}
|
||||
|
||||
CASE("interval")
|
||||
if (value == NULL || (_interval = parseUnits(value, UNIVERSAL)) <= 0) {
|
||||
msg = "Invalid interval";
|
||||
@@ -257,6 +238,9 @@ Error Arguments::parse(const char* args) {
|
||||
CASE("jstackdepth")
|
||||
if (value == NULL || (_jstackdepth = atoi(value)) <= 0) {
|
||||
msg = "jstackdepth must be > 0";
|
||||
} else {
|
||||
char* slash = strchr(value, '/');
|
||||
_truncated_stack_depth = slash != NULL ? atoi(slash + 1) : _jstackdepth;
|
||||
}
|
||||
|
||||
CASE("signal")
|
||||
@@ -269,22 +253,15 @@ Error Arguments::parse(const char* args) {
|
||||
|
||||
CASE("features")
|
||||
if (value != NULL) {
|
||||
if (strstr(value, "probesp")) _features.probe_sp = 1;
|
||||
if (strstr(value, "stats")) _features.stats = 1;
|
||||
if (strstr(value, "jnienv")) _features.jnienv = 1;
|
||||
if (strstr(value, "agct")) _features.agct = 1;
|
||||
if (strstr(value, "mixed")) _features.mixed = 1;
|
||||
if (strstr(value, "vtable")) _features.vtable_target = 1;
|
||||
if (strstr(value, "comptask")) _features.comp_task = 1;
|
||||
if (strstr(value, "pcaddr")) _features.pc_addr = 1;
|
||||
}
|
||||
|
||||
CASE("safemode") {
|
||||
// Left for compatibility purpose; will be eventually migrated to 'features'
|
||||
int bits = value == NULL ? INT_MAX : (int)strtol(value, NULL, 0);
|
||||
_features.unknown_java = (bits & 1) ? 0 : 1;
|
||||
_features.unwind_stub = (bits & 2) ? 0 : 1;
|
||||
_features.unwind_comp = (bits & 4) ? 0 : 1;
|
||||
_features.unwind_native = (bits & 8) ? 0 : 1;
|
||||
_features.java_anchor = (bits & 16) ? 0 : 1;
|
||||
_features.gc_traces = (bits & 32) ? 0 : 1;
|
||||
}
|
||||
|
||||
CASE("file")
|
||||
if (value == NULL || value[0] == 0) {
|
||||
msg = "file must not be empty";
|
||||
@@ -300,6 +277,9 @@ Error Arguments::parse(const char* args) {
|
||||
}
|
||||
_loglevel = value;
|
||||
|
||||
CASE("quiet")
|
||||
_quiet = true;
|
||||
|
||||
CASE("server")
|
||||
if (value == NULL || value[0] == 0) {
|
||||
msg = "server address must not be empty";
|
||||
@@ -318,12 +298,10 @@ Error Arguments::parse(const char* args) {
|
||||
_filter = value == NULL ? "" : value;
|
||||
|
||||
CASE("include")
|
||||
// Workaround -Wstringop-overflow warning
|
||||
if (value == arg + 8) appendToEmbeddedList(_include, arg + 8);
|
||||
_include.push_back(value);
|
||||
|
||||
CASE("exclude")
|
||||
// Workaround -Wstringop-overflow warning
|
||||
if (value == arg + 8) appendToEmbeddedList(_exclude, arg + 8);
|
||||
_exclude.push_back(value);
|
||||
|
||||
CASE("threads")
|
||||
_threads = true;
|
||||
@@ -331,23 +309,32 @@ Error Arguments::parse(const char* args) {
|
||||
CASE("sched")
|
||||
_sched = true;
|
||||
|
||||
CASE("record-cpu")
|
||||
_record_cpu = true;
|
||||
|
||||
CASE("live")
|
||||
_live = true;
|
||||
|
||||
CASE("allkernel")
|
||||
_ring = RING_KERNEL;
|
||||
CASE("nobatch")
|
||||
_nobatch = true;
|
||||
|
||||
CASE("alluser")
|
||||
_ring = RING_USER;
|
||||
_alluser = true;
|
||||
|
||||
CASE("cstack")
|
||||
if (value != NULL) {
|
||||
switch (value[0]) {
|
||||
case 'n': _cstack = CSTACK_NO; break;
|
||||
case 'd': _cstack = CSTACK_DWARF; break;
|
||||
case 'l': _cstack = CSTACK_LBR; break;
|
||||
case 'v': _cstack = CSTACK_VM; break;
|
||||
default: _cstack = CSTACK_FP;
|
||||
if (strcmp(value, "fp") == 0) {
|
||||
_cstack = CSTACK_FP;
|
||||
} else if (strcmp(value, "dwarf") == 0) {
|
||||
_cstack = CSTACK_DWARF;
|
||||
} else if (strcmp(value, "vm") == 0) {
|
||||
_cstack = CSTACK_VM;
|
||||
} else if (strcmp(value, "vmx") == 0) {
|
||||
// cstack=vmx is a shorthand for cstack=vm,features=mixed
|
||||
_cstack = CSTACK_VM;
|
||||
_features.mixed = 1;
|
||||
} else {
|
||||
_cstack = CSTACK_NO;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -360,6 +347,11 @@ Error Arguments::parse(const char* args) {
|
||||
}
|
||||
}
|
||||
|
||||
CASE("target-cpu")
|
||||
if (value == NULL || (_target_cpu = atoi(value)) < 0) {
|
||||
_target_cpu = -1;
|
||||
}
|
||||
|
||||
// Output style modifiers
|
||||
CASE("simple")
|
||||
_style |= STYLE_SIMPLE;
|
||||
@@ -388,6 +380,16 @@ Error Arguments::parse(const char* args) {
|
||||
CASE("end")
|
||||
_end = value;
|
||||
|
||||
CASE("nostop")
|
||||
_nostop = true;
|
||||
|
||||
CASE("ttsp")
|
||||
if (_begin != NULL || _end != NULL) {
|
||||
msg = "begin and end must both be empty when ttsp is set";
|
||||
}
|
||||
_begin = "SafepointSynchronize::begin";
|
||||
_end = "RuntimeService::record_safepoint_synchronized";
|
||||
|
||||
// FlameGraph options
|
||||
CASE("title")
|
||||
_title = value;
|
||||
@@ -398,6 +400,9 @@ Error Arguments::parse(const char* args) {
|
||||
CASE("reverse")
|
||||
_reverse = true;
|
||||
|
||||
CASE("inverted")
|
||||
_inverted = true;
|
||||
|
||||
DEFAULT()
|
||||
if (_unknown_arg == NULL) _unknown_arg = arg;
|
||||
}
|
||||
@@ -408,7 +413,7 @@ Error Arguments::parse(const char* args) {
|
||||
return Error(msg);
|
||||
}
|
||||
|
||||
if (_event == NULL && _alloc < 0 && _lock < 0 && _wall < 0) {
|
||||
if (_event == NULL && _alloc < 0 && _lock < 0 && _wall < 0 && _nativemem < 0 && _nativelock < 0 && _trace.empty()) {
|
||||
_event = EVENT_CPU;
|
||||
}
|
||||
|
||||
@@ -440,12 +445,6 @@ bool Arguments::hasTemporaryLog() const {
|
||||
return _log != NULL && strncmp(_log, "/tmp/asprof-log.", 16) == 0;
|
||||
}
|
||||
|
||||
// The linked list of string offsets is embedded right into _buf array
|
||||
void Arguments::appendToEmbeddedList(int& list, char* value) {
|
||||
((int*)value)[-1] = list;
|
||||
list = (int)(value - _buf);
|
||||
}
|
||||
|
||||
// Should match statically computed HASH(arg)
|
||||
long long Arguments::hash(const char* arg) {
|
||||
long long h = 0;
|
||||
|
||||
148
src/arguments.h
@@ -7,18 +7,23 @@
|
||||
#define _ARGUMENTS_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <vector>
|
||||
|
||||
|
||||
const long DEFAULT_INTERVAL = 10000000; // 10 ms
|
||||
const long DEFAULT_ALLOC_INTERVAL = 524287; // 512 KiB
|
||||
const long DEFAULT_LOCK_INTERVAL = 10000; // 10 us
|
||||
const long DEFAULT_PROC_INTERVAL = 30; // 30 seconds
|
||||
const int DEFAULT_JSTACKDEPTH = 2048;
|
||||
|
||||
const char* const EVENT_CPU = "cpu";
|
||||
const char* const EVENT_ALLOC = "alloc";
|
||||
const char* const EVENT_LOCK = "lock";
|
||||
const char* const EVENT_WALL = "wall";
|
||||
const char* const EVENT_CTIMER = "ctimer";
|
||||
const char* const EVENT_ITIMER = "itimer";
|
||||
const char* const EVENT_CPU = "cpu";
|
||||
const char* const EVENT_ALLOC = "alloc";
|
||||
const char* const EVENT_NATIVEMEM = "nativemem";
|
||||
const char* const EVENT_LOCK = "lock";
|
||||
const char* const EVENT_NATIVELOCK = "nativelock";
|
||||
const char* const EVENT_WALL = "wall";
|
||||
const char* const EVENT_CTIMER = "ctimer";
|
||||
const char* const EVENT_ITIMER = "itimer";
|
||||
|
||||
#define SHORT_ENUM __attribute__((__packed__))
|
||||
|
||||
@@ -28,9 +33,8 @@ enum SHORT_ENUM Action {
|
||||
ACTION_RESUME,
|
||||
ACTION_STOP,
|
||||
ACTION_DUMP,
|
||||
ACTION_CHECK,
|
||||
ACTION_STATUS,
|
||||
ACTION_MEMINFO,
|
||||
ACTION_METRICS,
|
||||
ACTION_LIST,
|
||||
ACTION_VERSION
|
||||
};
|
||||
@@ -40,12 +44,6 @@ enum SHORT_ENUM Counter {
|
||||
COUNTER_TOTAL
|
||||
};
|
||||
|
||||
enum SHORT_ENUM Ring {
|
||||
RING_ANY,
|
||||
RING_KERNEL,
|
||||
RING_USER
|
||||
};
|
||||
|
||||
enum Style {
|
||||
STYLE_SIMPLE = 0x1,
|
||||
STYLE_DOTTED = 0x2,
|
||||
@@ -58,12 +56,11 @@ enum Style {
|
||||
|
||||
// Whenever enum changes, update SETTING_CSTACK in FlightRecorder
|
||||
enum SHORT_ENUM CStack {
|
||||
CSTACK_DEFAULT,
|
||||
CSTACK_NO,
|
||||
CSTACK_FP,
|
||||
CSTACK_DWARF,
|
||||
CSTACK_LBR,
|
||||
CSTACK_VM
|
||||
CSTACK_DEFAULT, // use perf_event_open stack if available or Frame Pointer links otherwise
|
||||
CSTACK_NO, // do not collect native frames
|
||||
CSTACK_FP, // walk stack using Frame Pointer links
|
||||
CSTACK_DWARF, // use DWARF unwinding info from .eh_frame section
|
||||
CSTACK_VM // unwind using HotSpot VMStructs
|
||||
};
|
||||
|
||||
enum SHORT_ENUM Clock {
|
||||
@@ -79,7 +76,8 @@ enum SHORT_ENUM Output {
|
||||
OUTPUT_COLLAPSED,
|
||||
OUTPUT_FLAMEGRAPH,
|
||||
OUTPUT_TREE,
|
||||
OUTPUT_JFR
|
||||
OUTPUT_JFR,
|
||||
OUTPUT_OTLP
|
||||
};
|
||||
|
||||
enum JfrOption {
|
||||
@@ -94,24 +92,27 @@ enum JfrOption {
|
||||
JFR_SYNC_OPTS = NO_SYSTEM_INFO | NO_SYSTEM_PROPS | NO_NATIVE_LIBS | NO_CPU_LOAD | NO_HEAP_SUMMARY
|
||||
};
|
||||
|
||||
// Keep this in sync with JfrSync.java
|
||||
enum EventMask {
|
||||
EM_CPU = 1,
|
||||
EM_ALLOC = 2,
|
||||
EM_LOCK = 4,
|
||||
EM_WALL = 8,
|
||||
EM_NATIVEMEM = 16,
|
||||
EM_NATIVELOCK = 32,
|
||||
EM_METHOD_TRACE = 64
|
||||
};
|
||||
constexpr int EVENT_MASK_SIZE = 7;
|
||||
|
||||
struct StackWalkFeatures {
|
||||
// Stack recovery techniques used to workaround AsyncGetCallTrace flaws
|
||||
unsigned short unknown_java : 1;
|
||||
unsigned short unwind_stub : 1;
|
||||
unsigned short unwind_comp : 1;
|
||||
unsigned short unwind_native : 1;
|
||||
unsigned short java_anchor : 1;
|
||||
unsigned short gc_traces : 1;
|
||||
|
||||
// Additional HotSpot-specific features
|
||||
unsigned short probe_sp : 1;
|
||||
unsigned short vtable_target : 1;
|
||||
unsigned short comp_task : 1;
|
||||
unsigned short _reserved : 7;
|
||||
|
||||
StackWalkFeatures() : unknown_java(1), unwind_stub(1), unwind_comp(1), unwind_native(1), java_anchor(1), gc_traces(1),
|
||||
probe_sp(0), vtable_target(0), comp_task(0), _reserved(0) {
|
||||
}
|
||||
unsigned short stats : 1; // collect stack walking duration statistics
|
||||
unsigned short jnienv : 1; // verify JNIEnv* obtained using VMStructs
|
||||
unsigned short agct : 1; // force usage of AsyncGetCallTrace instead of VMStructs
|
||||
unsigned short mixed : 1; // mixed stack traces with Java and native frames interleaved
|
||||
unsigned short vtable_target : 1; // show receiver classes of vtable/itable stubs
|
||||
unsigned short comp_task : 1; // display current compilation task for JIT threads
|
||||
unsigned short pc_addr : 1; // record exact PC address for each sample
|
||||
unsigned short _padding : 9; // pad structure to 16 bits
|
||||
};
|
||||
|
||||
|
||||
@@ -120,6 +121,10 @@ struct Multiplier {
|
||||
long multiplier;
|
||||
};
|
||||
|
||||
constexpr Multiplier NANOS[] = {{'n', 1}, {'u', 1000}, {'m', 1000000}, {'s', 1000000000}, {0, 0}};
|
||||
constexpr Multiplier BYTES[] = {{'b', 1}, {'k', 1024}, {'m', 1048576}, {'g', 1073741824}, {0, 0}};
|
||||
constexpr Multiplier SECONDS[] = {{'s', 1}, {'m', 60}, {'h', 3600}, {'d', 86400}, {0, 0}};
|
||||
constexpr Multiplier UNIVERSAL[] = {{'n', 1}, {'u', 1000}, {'m', 1000000}, {'s', 1000000000}, {'b', 1}, {'k', 1024}, {'g', 1073741824}, {0, 0}};
|
||||
|
||||
class Error {
|
||||
private:
|
||||
@@ -146,25 +151,30 @@ class Arguments {
|
||||
char* _buf;
|
||||
bool _shared;
|
||||
|
||||
void appendToEmbeddedList(int& list, char* value);
|
||||
const char* expandFilePattern(const char* pattern);
|
||||
|
||||
static long long hash(const char* arg);
|
||||
static Output detectOutputFormat(const char* file);
|
||||
static long parseUnits(const char* str, const Multiplier* multipliers);
|
||||
static int parseTimeout(const char* str);
|
||||
|
||||
public:
|
||||
Action _action;
|
||||
Counter _counter;
|
||||
Ring _ring;
|
||||
const char* _event;
|
||||
std::vector<const char*> _trace;
|
||||
int _timeout;
|
||||
int _loop;
|
||||
size_t _mem_limit;
|
||||
long _interval;
|
||||
long _alloc;
|
||||
long _nativemem;
|
||||
long _lock;
|
||||
long _nativelock;
|
||||
long _wall;
|
||||
long _proc;
|
||||
bool _all;
|
||||
int _jstackdepth;
|
||||
int _truncated_stack_depth;
|
||||
int _signal;
|
||||
const char* _file;
|
||||
const char* _log;
|
||||
@@ -172,16 +182,23 @@ class Arguments {
|
||||
const char* _unknown_arg;
|
||||
const char* _server;
|
||||
const char* _filter;
|
||||
int _include;
|
||||
int _exclude;
|
||||
std::vector<const char*> _include;
|
||||
std::vector<const char*> _exclude;
|
||||
unsigned char _mcache;
|
||||
bool _loop;
|
||||
bool _preloaded;
|
||||
bool _quiet;
|
||||
bool _threads;
|
||||
bool _sched;
|
||||
bool _record_cpu;
|
||||
bool _tlab;
|
||||
bool _live;
|
||||
bool _nofree;
|
||||
bool _nobatch;
|
||||
bool _nostop;
|
||||
bool _alluser;
|
||||
bool _fdtransfer;
|
||||
const char* _fdtransfer_path;
|
||||
int _target_cpu;
|
||||
int _style;
|
||||
StackWalkFeatures _features;
|
||||
CStack _cstack;
|
||||
@@ -200,20 +217,28 @@ class Arguments {
|
||||
const char* _title;
|
||||
double _minwidth;
|
||||
bool _reverse;
|
||||
bool _inverted;
|
||||
|
||||
Arguments() :
|
||||
_buf(NULL),
|
||||
_shared(false),
|
||||
_action(ACTION_NONE),
|
||||
_counter(COUNTER_SAMPLES),
|
||||
_ring(RING_ANY),
|
||||
_event(NULL),
|
||||
_trace(),
|
||||
_timeout(0),
|
||||
_loop(0),
|
||||
_mem_limit(0),
|
||||
_interval(0),
|
||||
_alloc(-1),
|
||||
_nativemem(-1),
|
||||
_lock(-1),
|
||||
_nativelock(-1),
|
||||
_wall(-1),
|
||||
_proc(-1),
|
||||
_all(false),
|
||||
_jstackdepth(DEFAULT_JSTACKDEPTH),
|
||||
_truncated_stack_depth(DEFAULT_JSTACKDEPTH),
|
||||
_signal(0),
|
||||
_file(NULL),
|
||||
_log(NULL),
|
||||
@@ -221,18 +246,25 @@ class Arguments {
|
||||
_unknown_arg(NULL),
|
||||
_server(NULL),
|
||||
_filter(NULL),
|
||||
_include(0),
|
||||
_exclude(0),
|
||||
_include(),
|
||||
_exclude(),
|
||||
_mcache(0),
|
||||
_loop(false),
|
||||
_preloaded(false),
|
||||
_quiet(false),
|
||||
_threads(false),
|
||||
_sched(false),
|
||||
_record_cpu(false),
|
||||
_tlab(false),
|
||||
_live(false),
|
||||
_nofree(false),
|
||||
_nobatch(false),
|
||||
_nostop(false),
|
||||
_alluser(false),
|
||||
_fdtransfer(false),
|
||||
_fdtransfer_path(NULL),
|
||||
_target_cpu(-1),
|
||||
_style(0),
|
||||
_features(),
|
||||
_features{},
|
||||
_cstack(CSTACK_DEFAULT),
|
||||
_clock(CLK_DEFAULT),
|
||||
_output(OUTPUT_NONE),
|
||||
@@ -247,7 +279,8 @@ class Arguments {
|
||||
_end(NULL),
|
||||
_title(NULL),
|
||||
_minwidth(0),
|
||||
_reverse(false) {
|
||||
_reverse(false),
|
||||
_inverted(false) {
|
||||
}
|
||||
|
||||
~Arguments();
|
||||
@@ -262,15 +295,24 @@ class Arguments {
|
||||
|
||||
bool hasOutputFile() const {
|
||||
return _file != NULL &&
|
||||
(_action == ACTION_STOP || _action == ACTION_DUMP ? _output != OUTPUT_JFR : _action >= ACTION_CHECK);
|
||||
(_action == ACTION_STOP || _action == ACTION_DUMP ? _output != OUTPUT_JFR : _action >= ACTION_STATUS);
|
||||
}
|
||||
|
||||
bool hasOption(JfrOption option) const {
|
||||
return (_jfr_options & option) != 0;
|
||||
}
|
||||
|
||||
friend class FrameName;
|
||||
friend class Recording;
|
||||
int eventMask() const {
|
||||
return (_event != NULL ? EM_CPU : 0) |
|
||||
(_alloc >= 0 ? EM_ALLOC : 0) |
|
||||
(_lock >= 0 ? EM_LOCK : 0) |
|
||||
(_wall >= 0 ? EM_WALL : 0) |
|
||||
(_nativemem >= 0 ? EM_NATIVEMEM : 0) |
|
||||
(_nativelock >= 0 ? EM_NATIVELOCK : 0) |
|
||||
(!_trace.empty() ? EM_METHOD_TRACE : 0);
|
||||
}
|
||||
|
||||
static long parseUnits(const char* str, const Multiplier* multipliers);
|
||||
};
|
||||
|
||||
extern Arguments _global_args;
|
||||
|
||||
@@ -6,7 +6,9 @@
|
||||
#include "asprof.h"
|
||||
#include "hooks.h"
|
||||
#include "profiler.h"
|
||||
|
||||
#include "tsc.h"
|
||||
#include "threadLocalData.h"
|
||||
#include "userEvents.h"
|
||||
|
||||
static asprof_error_t asprof_error(const char* msg) {
|
||||
return (asprof_error_t)msg;
|
||||
@@ -49,3 +51,27 @@ DLLEXPORT asprof_error_t asprof_execute(const char* command, asprof_writer_t out
|
||||
|
||||
return asprof_error(error.message());
|
||||
}
|
||||
|
||||
DLLEXPORT asprof_thread_local_data* asprof_get_thread_local_data(void) {
|
||||
return ThreadLocalData::getThreadLocalData();
|
||||
}
|
||||
|
||||
DLLEXPORT asprof_jfr_event_key asprof_register_jfr_event(const char* name) {
|
||||
return UserEvents::registerEvent(name);
|
||||
}
|
||||
|
||||
#define asprof_str(s) #s
|
||||
|
||||
DLLEXPORT asprof_error_t asprof_emit_jfr_event(asprof_jfr_event_key type, const uint8_t* data, size_t len) {
|
||||
if (len > ASPROF_MAX_JFR_EVENT_LENGTH) {
|
||||
return asprof_error("Unable to emit JFR event larger than " asprof_str(ASPROF_MAX_JFR_EVENT_LENGTH) " bytes");
|
||||
}
|
||||
|
||||
UserEvent event;
|
||||
event._start_time = TSC::ticks();
|
||||
event._type = type;
|
||||
event._data = data;
|
||||
event._len = len;
|
||||
Profiler::instance()->recordEventOnly(USER_EVENT, &event);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
63
src/asprof.h
@@ -7,6 +7,7 @@
|
||||
#define _ASPROF_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __clang__
|
||||
# define DLLEXPORT __attribute__((visibility("default")))
|
||||
@@ -14,6 +15,8 @@
|
||||
# define DLLEXPORT __attribute__((visibility("default"),externally_visible))
|
||||
#endif
|
||||
|
||||
#define WEAK __attribute__((weak))
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@@ -35,6 +38,66 @@ typedef const char* (*asprof_error_str_t)(asprof_error_t err);
|
||||
DLLEXPORT asprof_error_t asprof_execute(const char* command, asprof_writer_t output_callback);
|
||||
typedef asprof_error_t (*asprof_execute_t)(const char* command, asprof_writer_t output_callback);
|
||||
|
||||
// This API is UNSTABLE and might change or be removed in the next version of async-profiler.
|
||||
typedef struct {
|
||||
// A thread-local sample counter, which increments (not necessarily by 1) every time a
|
||||
// stack profiling sample is taken using a profiling signal.
|
||||
//
|
||||
// The counter might be initialized lazily, only starting counting from 0 the first time
|
||||
// `asprof_get_thread_local_data` is called on a given thread. Further calls to
|
||||
// `asprof_get_thread_local_data` on a given thread will of course not reset the counter.
|
||||
volatile uint64_t sample_counter;
|
||||
} asprof_thread_local_data;
|
||||
|
||||
// This API is UNSTABLE and might change or be removed in the next version of async-profiler.
|
||||
//
|
||||
// Gets a pointer to asprof's thread-local data structure, see `asprof_thread_local_data`'s
|
||||
// documentation for the details of each field. This function might lazily initialize that
|
||||
// structure.
|
||||
//
|
||||
// This function can return NULL either if the profiler is not yet initializer, or in
|
||||
// case of an allocation failure.
|
||||
//
|
||||
// This function is *not* async-signal-safe. However, it is safe to call concurrently
|
||||
// with async-profiler operations, including initialization.
|
||||
DLLEXPORT asprof_thread_local_data* asprof_get_thread_local_data(void);
|
||||
typedef asprof_thread_local_data* (*asprof_get_thread_local_data_t)(void);
|
||||
|
||||
|
||||
typedef int asprof_jfr_event_key;
|
||||
|
||||
// This API is UNSTABLE and might change or be removed in the next version of async-profiler.
|
||||
//
|
||||
// Return a asprof_jfr_event_key identifier for a user-defined JFR key.
|
||||
// That identifier can then be used in `asprof_emit_jfr_event`
|
||||
//
|
||||
// The name is required to be valid (since it's a C string, NUL-free) UTF-8.
|
||||
//
|
||||
// Returns -1 on failure.
|
||||
DLLEXPORT asprof_jfr_event_key asprof_register_jfr_event(const char* name);
|
||||
typedef asprof_jfr_event_key (*asprof_register_jfr_event_t)(const char* name);
|
||||
|
||||
|
||||
#define ASPROF_MAX_JFR_EVENT_LENGTH 2048
|
||||
|
||||
// This API is UNSTABLE and might change or be removed in the next version of async-profiler.
|
||||
//
|
||||
// Emits a custom, user-defined JFR event. The key should be created via `asprof_register_jfr_event`.
|
||||
// The data can be arbitrary binary data, with size <= ASPROF_MAX_JFR_EVENT_LENGTH.
|
||||
//
|
||||
// User-defined events are included in the JFR under a `profiler.UserEvent` event type. That type will contain
|
||||
// (at least) the following fields:
|
||||
// 1. `startTime` [Long] - the emitted event's time in ticks.
|
||||
// 2. `eventThread` [java.lang.Thread] - the thread that emitted the events.
|
||||
// 3. `type` [profiler.types.UserEventType] - the event's type,
|
||||
// where `profiler.types.UserEventType` is an indexed string from the JFR constant pool.
|
||||
// 4. `data` [String] - the event data. This is the Latin-1 encoded version of the inputted data.
|
||||
// The Latin-1 encoding is used as a way to stuff the arbitrary byte input into something
|
||||
// that JFR supports (JFR technically supports byte arrays, but `jfr print` doesn't).
|
||||
//
|
||||
// Returns an error code or NULL on success.
|
||||
DLLEXPORT asprof_error_t asprof_emit_jfr_event(asprof_jfr_event_key type, const uint8_t* data, size_t len);
|
||||
typedef asprof_error_t (*asprof_emit_jfr_event_t)(asprof_jfr_event_key type, const uint8_t* data, size_t len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -3,14 +3,17 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include "callTraceStorage.h"
|
||||
#include "os.h"
|
||||
|
||||
#define COMMA ,
|
||||
|
||||
static const u32 INITIAL_CAPACITY = 65536;
|
||||
static const u32 CALL_TRACE_CHUNK = 8 * 1024 * 1024;
|
||||
static const u32 OVERFLOW_TRACE_ID = 0x7fffffff;
|
||||
static const size_t MEM_LIMIT_EXTRA = 0x10000; // reserve up to 64 KB for LongHashTable headers
|
||||
|
||||
|
||||
class LongHashTable {
|
||||
@@ -78,11 +81,12 @@ class LongHashTable {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
CallTrace CallTraceStorage::_overflow_trace = {1, {BCI_ERROR, (jmethodID)"storage_overflow"}};
|
||||
CallTrace CallTraceStorage::_overflow_trace = {1, {BCI_ERROR, LP64_ONLY(0 COMMA) (jmethodID)"storage_overflow"}};
|
||||
|
||||
CallTraceStorage::CallTraceStorage() : _allocator(CALL_TRACE_CHUNK) {
|
||||
_current_table = LongHashTable::allocate(NULL, INITIAL_CAPACITY);
|
||||
_used_memory = _current_table->usedMemory();
|
||||
_mem_limit = SIZE_MAX;
|
||||
_overflow = 0;
|
||||
}
|
||||
|
||||
@@ -92,21 +96,25 @@ CallTraceStorage::~CallTraceStorage() {
|
||||
}
|
||||
}
|
||||
|
||||
void CallTraceStorage::clear() {
|
||||
void CallTraceStorage::clear(size_t mem_limit) {
|
||||
while (_current_table->prev() != NULL) {
|
||||
_current_table = _current_table->destroy();
|
||||
}
|
||||
_current_table->clear();
|
||||
_used_memory = _current_table->usedMemory();
|
||||
_allocator.clear();
|
||||
_mem_limit = mem_limit ? mem_limit | MEM_LIMIT_EXTRA : SIZE_MAX;
|
||||
_overflow = 0;
|
||||
}
|
||||
|
||||
u32 CallTraceStorage::capacity() {
|
||||
// As capacity of each subsequent table doubles,
|
||||
// total capacity is a sum of geometric series: 64K + 128K + 256K...
|
||||
return _current_table->capacity() * 2 - INITIAL_CAPACITY;
|
||||
}
|
||||
|
||||
size_t CallTraceStorage::usedMemory() {
|
||||
size_t bytes = _allocator.usedMemory();
|
||||
for (LongHashTable* table = _current_table; table != NULL; table = table->prev()) {
|
||||
bytes += table->usedMemory();
|
||||
}
|
||||
return bytes;
|
||||
return _used_memory + _allocator.usedMemory();
|
||||
}
|
||||
|
||||
void CallTraceStorage::collectTraces(std::map<u32, CallTrace*>& map) {
|
||||
@@ -235,15 +243,23 @@ u32 CallTraceStorage::put(int num_frames, ASGCT_CallFrame* frames, u64 counter)
|
||||
|
||||
while (keys[slot] != hash) {
|
||||
if (keys[slot] == 0) {
|
||||
if (usedMemory() > _mem_limit) {
|
||||
// Stop adding new stack traces once memory limit is exceeded
|
||||
atomicInc(_overflow);
|
||||
return OVERFLOW_TRACE_ID;
|
||||
}
|
||||
|
||||
if (!__sync_bool_compare_and_swap(&keys[slot], 0, hash)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Increment the table size, and if the load factor exceeds 0.75, reserve a new table
|
||||
// Increment the table size, and if the load factor exceeds 0.75, reserve a new table.
|
||||
// This condition can be hit only once per table, so the below allocation is race-free.
|
||||
if (table->incSize() == capacity * 3 / 4) {
|
||||
LongHashTable* new_table = LongHashTable::allocate(table, capacity * 2);
|
||||
if (new_table != NULL) {
|
||||
__sync_bool_compare_and_swap(&_current_table, table, new_table);
|
||||
atomicInc(_used_memory, new_table->usedMemory());
|
||||
storeRelease(_current_table, new_table);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,8 +290,8 @@ u32 CallTraceStorage::put(int num_frames, ASGCT_CallFrame* frames, u64 counter)
|
||||
return capacity - (INITIAL_CAPACITY - 1) + slot;
|
||||
}
|
||||
|
||||
void CallTraceStorage::add(u32 call_trace_id, u64 counter) {
|
||||
if (call_trace_id == OVERFLOW_TRACE_ID) {
|
||||
void CallTraceStorage::add(u32 call_trace_id, u64 samples, u64 counter) {
|
||||
if (call_trace_id > capacity()) { // this also covers call_trace_id == OVERFLOW_TRACE_ID
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -283,9 +299,25 @@ void CallTraceStorage::add(u32 call_trace_id, u64 counter) {
|
||||
for (LongHashTable* table = _current_table; table != NULL; table = table->prev()) {
|
||||
if (call_trace_id >= table->capacity()) {
|
||||
CallTraceSample& s = table->values()[call_trace_id - table->capacity()];
|
||||
atomicInc(s.samples);
|
||||
atomicInc(s.samples, samples);
|
||||
atomicInc(s.counter, counter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CallTraceStorage::resetCounters() {
|
||||
for (LongHashTable* table = _current_table; table != NULL; table = table->prev()) {
|
||||
u64* keys = table->keys();
|
||||
CallTraceSample* values = table->values();
|
||||
u32 capacity = table->capacity();
|
||||
|
||||
for (u32 slot = 0; slot < capacity; slot++) {
|
||||
if (keys[slot] != 0) {
|
||||
CallTraceSample& s = values[slot];
|
||||
storeRelease(s.samples, 0);
|
||||
storeRelease(s.counter, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,11 +26,11 @@ struct CallTraceSample {
|
||||
u64 counter;
|
||||
|
||||
CallTrace* acquireTrace() {
|
||||
return __atomic_load_n(&trace, __ATOMIC_ACQUIRE);
|
||||
return loadAcquire(trace);
|
||||
}
|
||||
|
||||
void setTrace(CallTrace* value) {
|
||||
return __atomic_store_n(&trace, value, __ATOMIC_RELEASE);
|
||||
storeRelease(trace, value);
|
||||
}
|
||||
|
||||
CallTraceSample& operator+=(const CallTraceSample& s) {
|
||||
@@ -39,10 +39,6 @@ struct CallTraceSample {
|
||||
counter += s.counter;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator<(const CallTraceSample& other) const {
|
||||
return counter > other.counter;
|
||||
}
|
||||
};
|
||||
|
||||
class CallTraceStorage {
|
||||
@@ -51,6 +47,8 @@ class CallTraceStorage {
|
||||
|
||||
LinearAllocator _allocator;
|
||||
LongHashTable* _current_table;
|
||||
size_t _used_memory;
|
||||
size_t _mem_limit;
|
||||
u64 _overflow;
|
||||
|
||||
u64 calcHash(int num_frames, ASGCT_CallFrame* frames);
|
||||
@@ -61,15 +59,18 @@ class CallTraceStorage {
|
||||
CallTraceStorage();
|
||||
~CallTraceStorage();
|
||||
|
||||
void clear();
|
||||
void clear(size_t mem_limit);
|
||||
u32 capacity();
|
||||
size_t usedMemory();
|
||||
u64 overflow() { return _overflow; }
|
||||
|
||||
void collectTraces(std::map<u32, CallTrace*>& map);
|
||||
void collectSamples(std::vector<CallTraceSample*>& samples);
|
||||
void collectSamples(std::map<u64, CallTraceSample>& map);
|
||||
|
||||
u32 put(int num_frames, ASGCT_CallFrame* frames, u64 counter);
|
||||
void add(u32 call_trace_id, u64 counter);
|
||||
void add(u32 call_trace_id, u64 samples, u64 counter);
|
||||
void resetCounters();
|
||||
};
|
||||
|
||||
#endif // _CALLTRACESTORAGE
|
||||
|
||||
32
src/chk.cpp
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef __clang__
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "asprof.h"
|
||||
|
||||
|
||||
// libgcc refers to __sprintf_chk, but there is no such symbol in musl libc.
|
||||
// Export a weak symbol in order to make profiler library work both with glibc and musl.
|
||||
|
||||
extern "C" WEAK DLLEXPORT
|
||||
int __sprintf_chk(char* s, int flag, size_t slen, const char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
int ret = vsnprintf(s, slen, format, args);
|
||||
va_end(args);
|
||||
|
||||
if (ret >= slen) {
|
||||
fprintf(stderr, "__sprintf_chk failed\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif // __clang__
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <sys/mman.h>
|
||||
#include "codeCache.h"
|
||||
#include "dwarf.h"
|
||||
#include "log.h"
|
||||
#include "os.h"
|
||||
|
||||
|
||||
@@ -28,19 +29,22 @@ size_t NativeFunc::usedMemory(const char* name) {
|
||||
}
|
||||
|
||||
|
||||
CodeCache::CodeCache(const char* name, short lib_index, bool imports_patchable,
|
||||
const void* min_address, const void* max_address) {
|
||||
CodeCache::CodeCache(const char* name, short lib_index,
|
||||
const void* min_address, const void* max_address,
|
||||
const char* image_base) {
|
||||
_name = NativeFunc::create(name, -1);
|
||||
|
||||
_lib_index = lib_index;
|
||||
_min_address = min_address;
|
||||
_max_address = max_address;
|
||||
_text_base = NULL;
|
||||
_image_base = image_base;
|
||||
|
||||
_plt_offset = 0;
|
||||
_plt_size = 0;
|
||||
|
||||
memset(_imports, 0, sizeof(_imports));
|
||||
_imports_patchable = imports_patchable;
|
||||
_imports_patchable = false;
|
||||
_debug_symbols = false;
|
||||
|
||||
_dwarf_table = NULL;
|
||||
@@ -107,15 +111,6 @@ void CodeCache::sort() {
|
||||
if (_max_address == NO_MAX_ADDRESS) _max_address = _blobs[_count - 1]._end;
|
||||
}
|
||||
|
||||
void CodeCache::mark(NamePredicate predicate, char value) {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
const char* blob_name = _blobs[i]._name;
|
||||
if (blob_name != NULL && predicate(blob_name)) {
|
||||
NativeFunc::mark(blob_name, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CodeBlob* CodeCache::findBlob(const char* name) {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
const char* blob_name = _blobs[i]._name;
|
||||
@@ -168,31 +163,79 @@ const void* CodeCache::findSymbolByPrefix(const char* prefix) {
|
||||
}
|
||||
|
||||
const void* CodeCache::findSymbolByPrefix(const char* prefix, int prefix_len) {
|
||||
const void* result = NULL;
|
||||
for (int i = 0; i < _count; i++) {
|
||||
const char* blob_name = _blobs[i]._name;
|
||||
if (blob_name != NULL && strncmp(blob_name, prefix, prefix_len) == 0) {
|
||||
return _blobs[i]._start;
|
||||
result = _blobs[i]._start;
|
||||
// Symbols which contain a dot are only patched if no alternative is found,
|
||||
// see #1247
|
||||
if (strchr(blob_name + prefix_len, '.') == NULL) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void CodeCache::saveImport(ImportId id, void** entry) {
|
||||
for (int ty = 0; ty < NUM_IMPORT_TYPES; ty++) {
|
||||
if (_imports[id][ty] == nullptr) {
|
||||
_imports[id][ty] = entry;
|
||||
return;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void CodeCache::addImport(void** entry, const char* name) {
|
||||
switch (name[0]) {
|
||||
case 'a':
|
||||
if (strcmp(name, "aligned_alloc") == 0) {
|
||||
saveImport(im_aligned_alloc, entry);
|
||||
}
|
||||
break;
|
||||
case 'c':
|
||||
if (strcmp(name, "calloc") == 0) {
|
||||
saveImport(im_calloc, entry);
|
||||
}
|
||||
break;
|
||||
case 'd':
|
||||
if (strcmp(name, "dlopen") == 0) {
|
||||
_imports[im_dlopen] = entry;
|
||||
saveImport(im_dlopen, entry);
|
||||
}
|
||||
break;
|
||||
case 'f':
|
||||
if (strcmp(name, "free") == 0) {
|
||||
saveImport(im_free, entry);
|
||||
}
|
||||
break;
|
||||
case 'm':
|
||||
if (strcmp(name, "malloc") == 0) {
|
||||
saveImport(im_malloc, entry);
|
||||
}
|
||||
break;
|
||||
case 'p':
|
||||
if (strcmp(name, "pthread_create") == 0) {
|
||||
_imports[im_pthread_create] = entry;
|
||||
saveImport(im_pthread_create, entry);
|
||||
} else if (strcmp(name, "pthread_exit") == 0) {
|
||||
_imports[im_pthread_exit] = entry;
|
||||
saveImport(im_pthread_exit, entry);
|
||||
} else if (strcmp(name, "pthread_mutex_lock") == 0) {
|
||||
saveImport(im_pthread_mutex_lock, entry);
|
||||
} else if (strcmp(name, "pthread_rwlock_rdlock") == 0) {
|
||||
saveImport(im_pthread_rwlock_rdlock, entry);
|
||||
} else if (strcmp(name, "pthread_rwlock_wrlock") == 0) {
|
||||
saveImport(im_pthread_rwlock_wrlock, entry);
|
||||
} else if (strcmp(name, "pthread_setspecific") == 0) {
|
||||
_imports[im_pthread_setspecific] = entry;
|
||||
saveImport(im_pthread_setspecific, entry);
|
||||
} else if (strcmp(name, "poll") == 0) {
|
||||
_imports[im_poll] = entry;
|
||||
saveImport(im_poll, entry);
|
||||
} else if (strcmp(name, "posix_memalign") == 0) {
|
||||
saveImport(im_posix_memalign, entry);
|
||||
}
|
||||
break;
|
||||
case 'r':
|
||||
if (strcmp(name, "realloc") == 0) {
|
||||
saveImport(im_realloc, entry);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -201,31 +244,46 @@ void CodeCache::addImport(void** entry, const char* name) {
|
||||
void** CodeCache::findImport(ImportId id) {
|
||||
if (!_imports_patchable) {
|
||||
makeImportsPatchable();
|
||||
_imports_patchable = true;
|
||||
}
|
||||
return _imports[id];
|
||||
return _imports[id][PRIMARY];
|
||||
}
|
||||
|
||||
void CodeCache::patchImport(ImportId id, void* hook_func) {
|
||||
void** entry = findImport(id);
|
||||
if (entry != NULL) {
|
||||
*entry = hook_func;
|
||||
if (!_imports_patchable && !makeImportsPatchable()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int ty = 0; ty < NUM_IMPORT_TYPES; ty++) {
|
||||
void** entry = _imports[id][ty];
|
||||
if (entry != NULL) {
|
||||
*entry = hook_func;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::makeImportsPatchable() {
|
||||
bool CodeCache::makeImportsPatchable() {
|
||||
void** min_import = (void**)-1;
|
||||
void** max_import = NULL;
|
||||
for (int i = 0; i < NUM_IMPORTS; i++) {
|
||||
if (_imports[i] != NULL && _imports[i] < min_import) min_import = _imports[i];
|
||||
if (_imports[i] != NULL && _imports[i] > max_import) max_import = _imports[i];
|
||||
for (int j = 0; j < NUM_IMPORT_TYPES; j++) {
|
||||
void** entry = _imports[i][j];
|
||||
if (entry == NULL) continue;
|
||||
if (entry < min_import) min_import = entry;
|
||||
if (entry > max_import) max_import = entry;
|
||||
}
|
||||
}
|
||||
|
||||
if (max_import != NULL) {
|
||||
uintptr_t patch_start = (uintptr_t)min_import & ~OS::page_mask;
|
||||
uintptr_t patch_end = (uintptr_t)max_import & ~OS::page_mask;
|
||||
mprotect((void*)patch_start, patch_end - patch_start + OS::page_size, PROT_READ | PROT_WRITE);
|
||||
if (OS::mprotect((void*)patch_start, patch_end - patch_start + OS::page_size, PROT_READ | PROT_WRITE) != 0) {
|
||||
Log::warn("Could not patch %s", name());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
_imports_patchable = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void CodeCache::setDwarfTable(FrameDesc* table, int length) {
|
||||
@@ -265,5 +323,5 @@ size_t CodeCache::usedMemory() {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
bytes += NativeFunc::usedMemory(_blobs[i]._name);
|
||||
}
|
||||
return bytes;
|
||||
return bytes + sizeof(CodeCache);
|
||||
}
|
||||
|
||||
@@ -7,13 +7,12 @@
|
||||
#define _CODECACHE_H
|
||||
|
||||
#include <jvmti.h>
|
||||
#include "arch.h"
|
||||
|
||||
|
||||
#define NO_MIN_ADDRESS ((const void*)-1)
|
||||
#define NO_MAX_ADDRESS ((const void*)0)
|
||||
|
||||
typedef bool (*NamePredicate)(const char* name);
|
||||
|
||||
const int INITIAL_CODE_CACHE_CAPACITY = 1000;
|
||||
const int MAX_NATIVE_LIBS = 2048;
|
||||
|
||||
@@ -22,14 +21,31 @@ enum ImportId {
|
||||
im_dlopen,
|
||||
im_pthread_create,
|
||||
im_pthread_exit,
|
||||
im_pthread_mutex_lock,
|
||||
im_pthread_rwlock_rdlock,
|
||||
im_pthread_rwlock_wrlock,
|
||||
im_pthread_setspecific,
|
||||
im_poll,
|
||||
im_malloc,
|
||||
im_calloc,
|
||||
im_realloc,
|
||||
im_free,
|
||||
im_posix_memalign,
|
||||
im_aligned_alloc,
|
||||
NUM_IMPORTS
|
||||
};
|
||||
|
||||
enum ImportType {
|
||||
PRIMARY,
|
||||
SECONDARY,
|
||||
NUM_IMPORT_TYPES
|
||||
};
|
||||
|
||||
enum Mark {
|
||||
MARK_INTERPRETER = 1,
|
||||
MARK_COMPILER_ENTRY = 2
|
||||
MARK_VM_RUNTIME = 1,
|
||||
MARK_INTERPRETER = 2,
|
||||
MARK_COMPILER_ENTRY = 3,
|
||||
MARK_ASYNC_PROFILER = 4, // async-profiler internals such as native hooks.
|
||||
};
|
||||
|
||||
|
||||
@@ -95,11 +111,12 @@ class CodeCache {
|
||||
const void* _min_address;
|
||||
const void* _max_address;
|
||||
const char* _text_base;
|
||||
const char* _image_base;
|
||||
|
||||
unsigned int _plt_offset;
|
||||
unsigned int _plt_size;
|
||||
|
||||
void** _imports[NUM_IMPORTS];
|
||||
void** _imports[NUM_IMPORTS][NUM_IMPORT_TYPES];
|
||||
bool _imports_patchable;
|
||||
bool _debug_symbols;
|
||||
|
||||
@@ -111,14 +128,15 @@ class CodeCache {
|
||||
CodeBlob* _blobs;
|
||||
|
||||
void expand();
|
||||
void makeImportsPatchable();
|
||||
bool makeImportsPatchable();
|
||||
void saveImport(ImportId id, void** entry);
|
||||
|
||||
public:
|
||||
CodeCache(const char* name,
|
||||
short lib_index = -1,
|
||||
bool imports_patchable = false,
|
||||
const void* min_address = NO_MIN_ADDRESS,
|
||||
const void* max_address = NO_MAX_ADDRESS);
|
||||
const void* max_address = NO_MAX_ADDRESS,
|
||||
const char* image_base = NULL);
|
||||
|
||||
~CodeCache();
|
||||
|
||||
@@ -134,6 +152,10 @@ class CodeCache {
|
||||
return _max_address;
|
||||
}
|
||||
|
||||
const char* imageBase() const {
|
||||
return _image_base;
|
||||
}
|
||||
|
||||
bool contains(const void* address) const {
|
||||
return address >= _min_address && address < _max_address;
|
||||
}
|
||||
@@ -158,11 +180,25 @@ class CodeCache {
|
||||
void add(const void* start, int length, const char* name, bool update_bounds = false);
|
||||
void updateBounds(const void* start, const void* end);
|
||||
void sort();
|
||||
void mark(NamePredicate predicate, char value);
|
||||
|
||||
template <typename NamePredicate>
|
||||
inline void mark(NamePredicate predicate, char value) {
|
||||
for (int i = 0; i < _count; i++) {
|
||||
const char* blob_name = _blobs[i]._name;
|
||||
if (blob_name != NULL && predicate(blob_name)) {
|
||||
NativeFunc::mark(blob_name, value);
|
||||
}
|
||||
}
|
||||
|
||||
if (value == MARK_VM_RUNTIME && _name != NULL) {
|
||||
// In case a library has no debug symbols
|
||||
NativeFunc::mark(_name, value);
|
||||
}
|
||||
}
|
||||
|
||||
void addImport(void** entry, const char* name);
|
||||
void** findImport(ImportId id);
|
||||
void patchImport(ImportId, void* hook_func);
|
||||
void patchImport(ImportId id, void* hook_func);
|
||||
|
||||
CodeBlob* findBlob(const char* name);
|
||||
CodeBlob* findBlobByAddress(const void* address);
|
||||
@@ -175,6 +211,8 @@ class CodeCache {
|
||||
FrameDesc* findFrameDesc(const void* pc);
|
||||
|
||||
size_t usedMemory();
|
||||
|
||||
friend class UnloadProtection;
|
||||
};
|
||||
|
||||
|
||||
@@ -182,6 +220,7 @@ class CodeCacheArray {
|
||||
private:
|
||||
CodeCache* _libs[MAX_NATIVE_LIBS];
|
||||
int _count;
|
||||
size_t _used_memory;
|
||||
|
||||
public:
|
||||
CodeCacheArray() : _count(0) {
|
||||
@@ -192,13 +231,18 @@ class CodeCacheArray {
|
||||
}
|
||||
|
||||
int count() {
|
||||
return __atomic_load_n(&_count, __ATOMIC_ACQUIRE);
|
||||
return loadAcquire(_count);
|
||||
}
|
||||
|
||||
size_t usedMemory() {
|
||||
return _used_memory;
|
||||
}
|
||||
|
||||
void add(CodeCache* lib) {
|
||||
int index = __atomic_load_n(&_count, __ATOMIC_ACQUIRE);
|
||||
int index = loadAcquire(_count);
|
||||
_libs[index] = lib;
|
||||
__atomic_store_n(&_count, index + 1, __ATOMIC_RELEASE);
|
||||
_used_memory += lib->usedMemory();
|
||||
storeRelease(_count, index + 1);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -11,21 +11,30 @@ import java.util.*;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class Arguments {
|
||||
public String title = "Flame Graph";
|
||||
public String title;
|
||||
public String highlight;
|
||||
public String output;
|
||||
public String state;
|
||||
public Pattern include;
|
||||
public Pattern exclude;
|
||||
public double minwidth;
|
||||
public double grain;
|
||||
public double tail = 0.1;
|
||||
public int skip;
|
||||
public boolean help;
|
||||
public boolean reverse;
|
||||
public boolean inverted;
|
||||
public boolean diff;
|
||||
public boolean cpu;
|
||||
public boolean cpuTime;
|
||||
public boolean wall;
|
||||
public boolean alloc;
|
||||
public boolean nativemem;
|
||||
public boolean nativelock;
|
||||
public boolean leak;
|
||||
public boolean live;
|
||||
public boolean lock;
|
||||
public boolean trace;
|
||||
public boolean threads;
|
||||
public boolean classify;
|
||||
public boolean total;
|
||||
@@ -36,6 +45,7 @@ public class Arguments {
|
||||
public boolean dot;
|
||||
public long from;
|
||||
public long to;
|
||||
public long latency = -1;
|
||||
public final List<String> files = new ArrayList<>();
|
||||
|
||||
public Arguments(String... args) {
|
||||
@@ -43,7 +53,7 @@ public class Arguments {
|
||||
String arg = args[i];
|
||||
String fieldName;
|
||||
if (arg.startsWith("--")) {
|
||||
fieldName = arg.substring(2);
|
||||
fieldName = toCamelCase(arg.substring(2));
|
||||
} else if (arg.startsWith("-") && arg.length() == 2) {
|
||||
fieldName = alias(arg.charAt(1));
|
||||
} else {
|
||||
@@ -65,7 +75,7 @@ public class Arguments {
|
||||
} else if (type == int.class) {
|
||||
f.setInt(this, Integer.parseInt(args[++i]));
|
||||
} else if (type == double.class) {
|
||||
f.setDouble(this, Double.parseDouble(args[++i]));
|
||||
f.setDouble(this, parseRatio(args[++i]));
|
||||
} else if (type == long.class) {
|
||||
f.setLong(this, parseTimestamp(args[++i]));
|
||||
} else if (type == Pattern.class) {
|
||||
@@ -85,6 +95,8 @@ public class Arguments {
|
||||
return "output";
|
||||
case 'r':
|
||||
return "reverse";
|
||||
case 'i':
|
||||
return "inverted";
|
||||
case 'I':
|
||||
return "include";
|
||||
case 'X':
|
||||
@@ -98,6 +110,21 @@ public class Arguments {
|
||||
}
|
||||
}
|
||||
|
||||
private static String toCamelCase(String name) {
|
||||
for (int i; (i = name.lastIndexOf('-', name.length() - 2)) >= 0; ) {
|
||||
name = name.substring(0, i) + Character.toUpperCase(name.charAt(i + 1)) + name.substring(i + 2);
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
// Absolute floating point value or percentage followed by %
|
||||
private static double parseRatio(String value) {
|
||||
if (value.endsWith("%")) {
|
||||
return Double.parseDouble(value.substring(0, value.length() - 1)) / 100;
|
||||
}
|
||||
return Double.parseDouble(value);
|
||||
}
|
||||
|
||||
// Milliseconds or HH:mm:ss.S or yyyy-MM-dd'T'HH:mm:ss.S
|
||||
private static long parseTimestamp(String time) {
|
||||
if (time.indexOf(':') < 0) {
|
||||
|
||||
39
src/converter/one/convert/BidirectionalIndex.java
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class BidirectionalIndex<T> extends Index<T> {
|
||||
private final ArrayList<T> reverseIndex;
|
||||
|
||||
public BidirectionalIndex(Class<T> cls, T empty) {
|
||||
this(cls, empty, 256);
|
||||
}
|
||||
|
||||
public BidirectionalIndex(Class<T> cls, T empty, int initialCapacity) {
|
||||
super(cls, empty, initialCapacity);
|
||||
this.reverseIndex = new ArrayList<>(initialCapacity);
|
||||
this.reverseIndex.add(empty);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int index(T key) {
|
||||
assert super.size() == reverseIndex.size();
|
||||
int idx = super.index(key);
|
||||
if (idx < reverseIndex.size()) {
|
||||
// Key already exists
|
||||
return idx;
|
||||
}
|
||||
assert idx == reverseIndex.size();
|
||||
reverseIndex.add(key);
|
||||
return idx;
|
||||
}
|
||||
|
||||
public T getKey(int idx) {
|
||||
return reverseIndex.get(idx);
|
||||
}
|
||||
}
|
||||
@@ -13,23 +13,31 @@ import java.util.StringTokenizer;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static one.convert.Frame.*;
|
||||
import static one.convert.ResourceProcessor.*;
|
||||
|
||||
public class FlameGraph implements Comparator<Frame> {
|
||||
private static final Frame[] EMPTY_FRAME_ARRAY = {};
|
||||
private static final String[] FRAME_SUFFIX = {"_[0]", "_[j]", "_[i]", "", "", "_[k]", "_[1]"};
|
||||
private static final byte HAS_SUFFIX = (byte) 0x80;
|
||||
private static final int FLUSH_THRESHOLD = 15000;
|
||||
private static final long NEW_FRAME_DIFF = Long.MIN_VALUE;
|
||||
private static final Pattern TID_FRAME_PATTERN = Pattern.compile("\\[(.* )?tid=\\d+]");
|
||||
|
||||
private final Arguments args;
|
||||
private final Index<String> cpool = new Index<>(String.class, "");
|
||||
private final Frame root = new Frame(0, TYPE_NATIVE);
|
||||
private final StringBuilder outbuf = new StringBuilder(FLUSH_THRESHOLD + 1000);
|
||||
|
||||
private String title = "Flame Graph";
|
||||
private int[] order;
|
||||
private int[] cpoolMap;
|
||||
private int depth;
|
||||
private int lastLevel;
|
||||
private long lastX;
|
||||
private long lastTotal;
|
||||
private long lastDiff;
|
||||
private long mintotal;
|
||||
private long maxdiff = -1;
|
||||
|
||||
public FlameGraph(Arguments args) {
|
||||
this.args = args;
|
||||
@@ -69,7 +77,11 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
boolean needRebuild = args.reverse || args.include != null || args.exclude != null;
|
||||
|
||||
try (BufferedReader br = new BufferedReader(in)) {
|
||||
while (!br.readLine().startsWith("const cpool")) ;
|
||||
for (String line; !(line = br.readLine()).startsWith("const cpool"); ) {
|
||||
if (line.startsWith("<h1")) {
|
||||
title = line.substring(line.indexOf('>') + 1, line.lastIndexOf("</h1>"));
|
||||
}
|
||||
}
|
||||
br.readLine();
|
||||
|
||||
String s = "";
|
||||
@@ -82,6 +94,8 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
while (!br.readLine().isEmpty()) ;
|
||||
|
||||
for (String line; !(line = br.readLine()).isEmpty(); ) {
|
||||
if (line.startsWith("d=")) continue; // artifact of a differential flame graph
|
||||
|
||||
StringTokenizer st = new StringTokenizer(line.substring(2, line.length() - 1), ",");
|
||||
int nameAndType = Integer.parseInt(st.nextToken());
|
||||
|
||||
@@ -101,12 +115,10 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
|
||||
int titleIndex = nameAndType >>> 3;
|
||||
byte type = (byte) (nameAndType & 7);
|
||||
if (st.hasMoreTokens() && (type <= TYPE_INLINED || type >= TYPE_C1_COMPILED)) {
|
||||
type = TYPE_JIT_COMPILED;
|
||||
}
|
||||
byte normalizedType = type <= TYPE_INLINED || type >= TYPE_C1_COMPILED ? TYPE_JIT_COMPILED : type;
|
||||
|
||||
Frame f = level > 0 || needRebuild ? new Frame(titleIndex, type) : root;
|
||||
f.self = f.total = total;
|
||||
Frame f = level > 0 || needRebuild ? new Frame(titleIndex, normalizedType) : root;
|
||||
fillFrameCounters(f, type, total);
|
||||
if (st.hasMoreTokens()) f.inlined = Long.parseLong(st.nextToken());
|
||||
if (st.hasMoreTokens()) f.c1 = Long.parseLong(st.nextToken());
|
||||
if (st.hasMoreTokens()) f.interpreted = Long.parseLong(st.nextToken());
|
||||
@@ -149,7 +161,13 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
|
||||
Frame frame = root;
|
||||
if (args.reverse) {
|
||||
for (int i = stack.size; --i >= args.skip; ) {
|
||||
// Retain by-thread grouping, unless thread frame is skipped
|
||||
int skip = args.skip;
|
||||
if (skip == 0 && stack.size > 0 && isThreadFrame(stack.names[0], stack.types[0])) {
|
||||
frame = addChild(frame, stack.names[0], stack.types[0], ticks);
|
||||
skip = 1;
|
||||
}
|
||||
for (int i = stack.size; --i >= skip; ) {
|
||||
frame = addChild(frame, stack.names[i], stack.types[i], ticks);
|
||||
}
|
||||
} else {
|
||||
@@ -163,6 +181,32 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
depth = Math.max(depth, stack.size);
|
||||
}
|
||||
|
||||
public void diff(FlameGraph base) {
|
||||
// Build a map that translates this cpool keys to the base flamegraph's cpool keys
|
||||
cpoolMap = Arrays.stream(cpool.keys()).mapToInt(title -> base.cpool.getOrDefault(title, -1)).toArray();
|
||||
diff(base.root, root);
|
||||
}
|
||||
|
||||
private void diff(Frame base, Frame current) {
|
||||
current.diff = base == null ? NEW_FRAME_DIFF : current.self - base.self;
|
||||
maxdiff = Math.max(maxdiff, Math.abs(current.diff));
|
||||
|
||||
for (Frame child : current.values()) {
|
||||
Frame baseChild = base == null ? null : base.get(translateKey(child.key));
|
||||
diff(baseChild, child);
|
||||
}
|
||||
}
|
||||
|
||||
private int translateKey(int key) {
|
||||
return cpoolMap[key & TITLE_MASK] | (key & ~TITLE_MASK);
|
||||
}
|
||||
|
||||
public void dump(OutputStream out) throws IOException {
|
||||
try (PrintStream ps = new PrintStream(out, false, "UTF-8")) {
|
||||
dump(ps);
|
||||
}
|
||||
}
|
||||
|
||||
public void dump(PrintStream out) {
|
||||
mintotal = (long) (root.total * args.minwidth / 100);
|
||||
|
||||
@@ -178,10 +222,15 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
out.print(Math.min(depth * 16, 32767));
|
||||
|
||||
tail = printTill(out, tail, "/*title:*/");
|
||||
out.print(args.title);
|
||||
out.print(args.title != null ? args.title : title);
|
||||
|
||||
tail = printTill(out, tail, "/*reverse:*/false");
|
||||
out.print(args.reverse);
|
||||
// inverted toggles the layout for reversed stacktraces from icicle to flamegraph
|
||||
// and for default stacktraces from flamegraphs to icicle.
|
||||
tail = printTill(out, tail, "/*inverted:*/false");
|
||||
out.print(args.reverse ^ args.inverted);
|
||||
|
||||
tail = printTill(out, tail, "/*maxdiff:*/-1");
|
||||
out.print(maxdiff);
|
||||
|
||||
tail = printTill(out, tail, "/*depth:*/0");
|
||||
out.print(depth);
|
||||
@@ -199,12 +248,6 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
out.print(tail);
|
||||
}
|
||||
|
||||
private String printTill(PrintStream out, String data, String till) {
|
||||
int index = data.indexOf(till);
|
||||
out.print(data.substring(0, index));
|
||||
return data.substring(index + till.length());
|
||||
}
|
||||
|
||||
private void printCpool(PrintStream out) {
|
||||
String[] strings = cpool.keys();
|
||||
Arrays.sort(strings);
|
||||
@@ -223,6 +266,15 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
}
|
||||
|
||||
private void printFrame(PrintStream out, Frame frame, int level, long x) {
|
||||
StringBuilder sb = outbuf;
|
||||
if (frame.diff != lastDiff) {
|
||||
if (frame.diff == NEW_FRAME_DIFF) {
|
||||
sb.append("d=U\n");
|
||||
} else {
|
||||
sb.append("d=").append(frame.diff).append('\n');
|
||||
}
|
||||
}
|
||||
|
||||
int nameAndType = order[frame.getTitleIndex()] << 3 | frame.getType();
|
||||
boolean hasExtraTypes = (frame.inlined | frame.c1 | frame.interpreted) != 0 &&
|
||||
frame.inlined < frame.total && frame.interpreted < frame.total;
|
||||
@@ -234,7 +286,7 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
func = 'n';
|
||||
}
|
||||
|
||||
StringBuilder sb = outbuf.append(func).append('(').append(nameAndType);
|
||||
sb.append(func).append('(').append(nameAndType);
|
||||
if (func == 'f') {
|
||||
sb.append(',').append(level).append(',').append(x - lastX);
|
||||
}
|
||||
@@ -254,6 +306,7 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
lastLevel = level;
|
||||
lastX = x;
|
||||
lastTotal = frame.total;
|
||||
lastDiff = frame.diff;
|
||||
|
||||
Frame[] children = frame.values().toArray(EMPTY_FRAME_ARRAY);
|
||||
Arrays.sort(children, this);
|
||||
@@ -275,6 +328,9 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
sb.append(strings[frame.getTitleIndex()]).append(FRAME_SUFFIX[frame.getType()]);
|
||||
if (frame.self > 0) {
|
||||
int tmpLength = sb.length();
|
||||
if (maxdiff >= 0) {
|
||||
sb.append(' ').append(frame.diff == NEW_FRAME_DIFF ? 0 : frame.self - frame.diff);
|
||||
}
|
||||
out.print(sb.append(' ').append(frame.self).append('\n'));
|
||||
sb.setLength(tmpLength);
|
||||
}
|
||||
@@ -312,6 +368,21 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
return include != null;
|
||||
}
|
||||
|
||||
private static void fillFrameCounters(Frame frame, byte type, long ticks) {
|
||||
frame.self = frame.total = ticks;
|
||||
switch (type) {
|
||||
case TYPE_INTERPRETED:
|
||||
frame.interpreted = ticks;
|
||||
break;
|
||||
case TYPE_INLINED:
|
||||
frame.inlined = ticks;
|
||||
break;
|
||||
case TYPE_C1_COMPILED:
|
||||
frame.c1 = ticks;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private Frame addChild(Frame frame, String title, byte type, long ticks) {
|
||||
frame.total += ticks;
|
||||
|
||||
@@ -355,6 +426,10 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean isThreadFrame(String name, byte type) {
|
||||
return type == TYPE_NATIVE && name.startsWith("[") && TID_FRAME_PATTERN.matcher(name).matches();
|
||||
}
|
||||
|
||||
private static int getCommonPrefix(String a, String b) {
|
||||
int length = Math.min(a.length(), b.length());
|
||||
for (int i = 0; i < length; i++) {
|
||||
@@ -377,29 +452,12 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
return s;
|
||||
}
|
||||
|
||||
private static String getResource(String name) {
|
||||
try (InputStream stream = FlameGraph.class.getResourceAsStream(name)) {
|
||||
if (stream == null) {
|
||||
throw new IOException("No resource found");
|
||||
}
|
||||
|
||||
ByteArrayOutputStream result = new ByteArrayOutputStream();
|
||||
byte[] buffer = new byte[32768];
|
||||
for (int length; (length = stream.read(buffer)) != -1; ) {
|
||||
result.write(buffer, 0, length);
|
||||
}
|
||||
return result.toString("UTF-8");
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Can't load resource with name " + name);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(Frame f1, Frame f2) {
|
||||
return order[f1.getTitleIndex()] - order[f2.getTitleIndex()];
|
||||
}
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
public static FlameGraph parse(String input, Arguments args) throws IOException {
|
||||
FlameGraph fg = new FlameGraph(args);
|
||||
try (InputStreamReader in = new InputStreamReader(new FileInputStream(input), StandardCharsets.UTF_8)) {
|
||||
if (input.endsWith(".html")) {
|
||||
@@ -408,6 +466,11 @@ public class FlameGraph implements Comparator<Frame> {
|
||||
fg.parseCollapsed(in);
|
||||
}
|
||||
}
|
||||
return fg;
|
||||
}
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
FlameGraph fg = parse(input, args);
|
||||
try (PrintStream out = new PrintStream(output, "UTF-8")) {
|
||||
fg.dump(out);
|
||||
}
|
||||
|
||||
@@ -16,11 +16,13 @@ public class Frame extends HashMap<Integer, Frame> {
|
||||
public static final byte TYPE_KERNEL = 5;
|
||||
public static final byte TYPE_C1_COMPILED = 6;
|
||||
|
||||
private static final int TYPE_SHIFT = 28;
|
||||
static final int TYPE_SHIFT = 28;
|
||||
static final int TITLE_MASK = (1 << TYPE_SHIFT) - 1;
|
||||
|
||||
final int key;
|
||||
long total;
|
||||
long self;
|
||||
long diff;
|
||||
long inlined, c1, interpreted;
|
||||
|
||||
private Frame(int key) {
|
||||
@@ -36,7 +38,7 @@ public class Frame extends HashMap<Integer, Frame> {
|
||||
}
|
||||
|
||||
int getTitleIndex() {
|
||||
return key & ((1 << TYPE_SHIFT) - 1);
|
||||
return key & TITLE_MASK;
|
||||
}
|
||||
|
||||
byte getType() {
|
||||
|
||||
@@ -8,10 +8,25 @@ package one.convert;
|
||||
import java.lang.reflect.Array;
|
||||
import java.util.HashMap;
|
||||
|
||||
/**
|
||||
* Container which records the index of appearance of the value it holds.
|
||||
* <p>
|
||||
* Allows retrieving the index of a given object in constant time, as well as
|
||||
* an ordered list of all values seen.
|
||||
* <p>
|
||||
* The object at index 0 is always the empty object.
|
||||
*
|
||||
* @param <T> type of the objects held in the container.
|
||||
*/
|
||||
public class Index<T> extends HashMap<T, Integer> {
|
||||
private final Class<T> cls;
|
||||
|
||||
public Index(Class<T> cls, T empty) {
|
||||
this(cls, empty, 256);
|
||||
}
|
||||
|
||||
public Index(Class<T> cls, T empty, int initialCapacity) {
|
||||
super(initialCapacity);
|
||||
this.cls = cls;
|
||||
super.put(empty, 0);
|
||||
}
|
||||
|
||||
@@ -5,56 +5,103 @@
|
||||
|
||||
package one.convert;
|
||||
|
||||
import one.jfr.ClassRef;
|
||||
import one.jfr.Dictionary;
|
||||
import one.jfr.JfrReader;
|
||||
import one.jfr.MethodRef;
|
||||
import one.jfr.*;
|
||||
import one.jfr.event.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.BitSet;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static one.convert.Frame.*;
|
||||
|
||||
public abstract class JfrConverter extends Classifier {
|
||||
protected final JfrReader jfr;
|
||||
protected final Arguments args;
|
||||
protected final EventCollector collector;
|
||||
protected Dictionary<String> methodNames;
|
||||
|
||||
public JfrConverter(JfrReader jfr, Arguments args) {
|
||||
this.jfr = jfr;
|
||||
this.args = args;
|
||||
|
||||
EventCollector collector = createCollector(args);
|
||||
this.collector = args.nativemem && args.leak ? new MallocLeakAggregator(collector, args.tail) : collector;
|
||||
}
|
||||
|
||||
public void convert() throws IOException {
|
||||
TimeIntervals timeIntervals = readLatencyTimeIntervals();
|
||||
|
||||
jfr.stopAtNewChunk = true;
|
||||
while (jfr.hasMoreChunks()) {
|
||||
// Reset method dictionary, since new chunk may have different IDs
|
||||
methodNames = new Dictionary<>();
|
||||
|
||||
collector.beforeChunk();
|
||||
collectEvents(timeIntervals);
|
||||
collector.afterChunk();
|
||||
|
||||
convertChunk();
|
||||
}
|
||||
|
||||
if (collector.finish()) {
|
||||
convertChunk();
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void convertChunk() throws IOException;
|
||||
protected final TimeIntervals readLatencyTimeIntervals() throws IOException {
|
||||
if (args.latency < 0) return null;
|
||||
|
||||
protected EventAggregator collectEvents() throws IOException {
|
||||
EventAggregator agg = new EventAggregator(args.threads, args.total);
|
||||
TimeIntervals.Builder intervalsBuilder = new TimeIntervals.Builder();
|
||||
boolean foundMethodTrace = false; // We'll throw an exception if none is found
|
||||
|
||||
Class<? extends Event> eventClass =
|
||||
args.live ? LiveObject.class :
|
||||
args.alloc ? AllocationSample.class :
|
||||
args.lock ? ContendedLock.class : ExecutionSample.class;
|
||||
jfr.stopAtNewChunk = true;
|
||||
while (jfr.hasMoreChunks()) {
|
||||
long minLatencyTicks = args.latency * jfr.ticksPerSec / 1000;
|
||||
MethodTrace event;
|
||||
while ((event = jfr.readEvent(MethodTrace.class)) != null) {
|
||||
foundMethodTrace = true;
|
||||
if (event.duration >= minLatencyTicks) {
|
||||
intervalsBuilder.add(jfr.eventTimeToNanos(event.time), jfr.eventTimeToNanos(event.time + event.duration));
|
||||
}
|
||||
}
|
||||
}
|
||||
jfr.rewind();
|
||||
|
||||
long threadStates = 0;
|
||||
if (!foundMethodTrace) {
|
||||
throw new RuntimeException("No jdk.MethodTrace events found");
|
||||
}
|
||||
return intervalsBuilder.build();
|
||||
}
|
||||
|
||||
protected EventCollector createCollector(Arguments args) {
|
||||
return new EventAggregator(args.threads, args.grain);
|
||||
}
|
||||
|
||||
protected void collectEvents(TimeIntervals timeIntervals) throws IOException {
|
||||
// args.nativemem ? MallocEvent.class should always be first for the leak detection feature
|
||||
Class<? extends Event> eventClass = args.nativemem ? MallocEvent.class
|
||||
: args.nativelock ? NativeLockEvent.class
|
||||
: args.live ? LiveObject.class
|
||||
: args.alloc ? AllocationSample.class
|
||||
: args.lock ? ContendedLock.class
|
||||
: args.trace ? MethodTrace.class
|
||||
: ExecutionSample.class;
|
||||
|
||||
BitSet threadStates = null;
|
||||
if (args.state != null) {
|
||||
threadStates = new BitSet();
|
||||
for (String state : args.state.toUpperCase().split(",")) {
|
||||
threadStates |= 1L << toThreadState(state);
|
||||
threadStates.set(toThreadState(state));
|
||||
}
|
||||
} else if (args.cpu) {
|
||||
threadStates = 1L << toThreadState("DEFAULT");
|
||||
threadStates = getThreadStates(true);
|
||||
} else if (args.wall) {
|
||||
threadStates = ~(1L << toThreadState("DEFAULT"));
|
||||
threadStates = getThreadStates(false);
|
||||
} else if (args.cpuTime) {
|
||||
threadStates = new BitSet();
|
||||
threadStates.set(ExecutionSample.CPU_TIME_SAMPLE);
|
||||
}
|
||||
|
||||
long startTicks = args.from != 0 ? toTicks(args.from) : Long.MIN_VALUE;
|
||||
@@ -62,13 +109,60 @@ public abstract class JfrConverter extends Classifier {
|
||||
|
||||
for (Event event; (event = jfr.readEvent(eventClass)) != null; ) {
|
||||
if (event.time >= startTicks && event.time <= endTicks) {
|
||||
if (threadStates == 0 || (threadStates & (1L << ((ExecutionSample) event).threadState)) != 0) {
|
||||
agg.collect(event);
|
||||
if (threadStates == null || threadStates.get(((ExecutionSample) event).threadState)) {
|
||||
if (timeIntervals == null || timeIntervals.contains(jfr.eventTimeToNanos(event.time))) {
|
||||
collector.collect(event);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return agg;
|
||||
protected void convertChunk() {
|
||||
// To be overridden in subclasses
|
||||
}
|
||||
|
||||
protected boolean excludeStack(int stackId, int threadId, long classId) {
|
||||
Pattern include = args.include;
|
||||
Pattern exclude = args.exclude;
|
||||
if (include == null && exclude == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (args.threads) {
|
||||
String threadName = getThreadName(threadId);
|
||||
if (exclude != null && exclude.matcher(threadName).matches()) {
|
||||
return true;
|
||||
}
|
||||
if (include != null && include.matcher(threadName).matches()) {
|
||||
if (exclude == null) return false;
|
||||
include = null;
|
||||
}
|
||||
}
|
||||
|
||||
if (classId != 0) {
|
||||
String className = getClassName(classId);
|
||||
if (exclude != null && exclude.matcher(className).matches()) {
|
||||
return true;
|
||||
}
|
||||
if (include != null && include.matcher(className).matches()) {
|
||||
if (exclude == null) return false;
|
||||
include = null;
|
||||
}
|
||||
}
|
||||
|
||||
StackTrace stackTrace = jfr.stackTraces.get(stackId);
|
||||
for (int i = 0; i < stackTrace.methods.length; i++) {
|
||||
String name = getMethodName(stackTrace.methods[i], stackTrace.types[i]);
|
||||
if (exclude != null && exclude.matcher(name).matches()) {
|
||||
return true;
|
||||
}
|
||||
if (include != null && include.matcher(name).matches()) {
|
||||
if (exclude == null) return false;
|
||||
include = null;
|
||||
}
|
||||
}
|
||||
return include != null;
|
||||
}
|
||||
|
||||
protected int toThreadState(String name) {
|
||||
@@ -83,6 +177,17 @@ public abstract class JfrConverter extends Classifier {
|
||||
throw new IllegalArgumentException("Unknown thread state: " + name);
|
||||
}
|
||||
|
||||
protected BitSet getThreadStates(boolean cpu) {
|
||||
BitSet set = new BitSet();
|
||||
Map<Integer, String> threadStates = jfr.enums.get("jdk.types.ThreadState");
|
||||
if (threadStates != null) {
|
||||
for (Map.Entry<Integer, String> entry : threadStates.entrySet()) {
|
||||
set.set(entry.getKey(), "STATE_DEFAULT".equals(entry.getValue()) == cpu);
|
||||
}
|
||||
}
|
||||
return set;
|
||||
}
|
||||
|
||||
// millis can be an absolute timestamp or an offset from the beginning/end of the recording
|
||||
protected long toTicks(long millis) {
|
||||
long nanos = millis * 1_000_000;
|
||||
@@ -95,7 +200,7 @@ public abstract class JfrConverter extends Classifier {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getMethodName(long methodId, byte methodType) {
|
||||
public String getMethodName(long methodId, byte methodType) {
|
||||
String result = methodNames.get(methodId);
|
||||
if (result == null) {
|
||||
methodNames.put(methodId, result = resolveMethodName(methodId, methodType));
|
||||
@@ -125,7 +230,7 @@ public abstract class JfrConverter extends Classifier {
|
||||
}
|
||||
}
|
||||
|
||||
protected String getClassName(long classId) {
|
||||
public String getClassName(long classId) {
|
||||
ClassRef cls = jfr.classes.get(classId);
|
||||
if (cls == null) {
|
||||
return "null";
|
||||
@@ -144,13 +249,7 @@ public abstract class JfrConverter extends Classifier {
|
||||
return name;
|
||||
}
|
||||
|
||||
protected String getThreadName(int tid) {
|
||||
String threadName = jfr.threads.get(tid);
|
||||
return threadName == null ? "[tid=" + tid + ']' :
|
||||
threadName.startsWith("[tid=") ? threadName : '[' + threadName + " tid=" + tid + ']';
|
||||
}
|
||||
|
||||
protected String toJavaClassName(byte[] symbol, int start, boolean dotted) {
|
||||
private String toJavaClassName(byte[] symbol, int start, boolean dotted) {
|
||||
int end = symbol.length;
|
||||
if (start > 0) {
|
||||
switch (symbol[start]) {
|
||||
@@ -205,6 +304,29 @@ public abstract class JfrConverter extends Classifier {
|
||||
return dotted ? s.replace('/', '.') : s;
|
||||
}
|
||||
|
||||
public StackTraceElement getStackTraceElement(long methodId, byte methodType, int location) {
|
||||
MethodRef method = jfr.methods.get(methodId);
|
||||
if (method == null) {
|
||||
return new StackTraceElement("", "unknown", null, 0);
|
||||
}
|
||||
|
||||
ClassRef cls = jfr.classes.get(method.cls);
|
||||
byte[] className = jfr.symbols.get(cls.name);
|
||||
byte[] methodName = jfr.symbols.get(method.name);
|
||||
|
||||
String classStr = className == null || className.length == 0 || isNativeFrame(methodType) ? "" :
|
||||
toJavaClassName(className, 0, args.dot);
|
||||
String methodStr = methodName == null || methodName.length == 0 ? "" :
|
||||
new String(methodName, StandardCharsets.UTF_8);
|
||||
return new StackTraceElement(classStr, methodStr, null, location >>> 16);
|
||||
}
|
||||
|
||||
public String getThreadName(int tid) {
|
||||
String threadName = jfr.threads.get(tid);
|
||||
return threadName == null ? "[tid=" + tid + ']' :
|
||||
threadName.startsWith("[tid=") ? threadName : '[' + threadName + " tid=" + tid + ']';
|
||||
}
|
||||
|
||||
protected boolean isNativeFrame(byte methodType) {
|
||||
// In JDK Flight Recorder, TYPE_NATIVE denotes Java native methods,
|
||||
// while in async-profiler, TYPE_NATIVE is for C methods
|
||||
@@ -212,4 +334,37 @@ public abstract class JfrConverter extends Classifier {
|
||||
methodType == TYPE_CPP ||
|
||||
methodType == TYPE_KERNEL;
|
||||
}
|
||||
|
||||
public String getValueType() {
|
||||
if (args.nativemem) return "malloc";
|
||||
if (args.alloc || args.live) return "allocations";
|
||||
if (args.lock) return "locks";
|
||||
return "cpu";
|
||||
}
|
||||
|
||||
public String getSampleUnits() {
|
||||
return "count";
|
||||
}
|
||||
|
||||
public String getTotalUnits() {
|
||||
if (args.nativemem || args.alloc || args.live) return "bytes";
|
||||
return "nanoseconds";
|
||||
}
|
||||
|
||||
public double counterFactor() {
|
||||
return (args.lock || args.nativelock) ? jfr.nanosPerTick : 1.0;
|
||||
}
|
||||
|
||||
// Select sum(samples) or sum(value) depending on the --total option.
|
||||
// For lock and nativelock events, convert lock duration from ticks to nanoseconds.
|
||||
protected abstract class AggregatedEventVisitor implements EventCollector.Visitor {
|
||||
private final double factor = !args.total ? 0.0 : counterFactor();
|
||||
|
||||
@Override
|
||||
public final void visit(Event event, long samples, long value) {
|
||||
visit(event, factor == 0.0 ? samples : factor == 1.0 ? value : (long) (value * factor));
|
||||
}
|
||||
|
||||
protected abstract void visit(Event event, long value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,9 +9,7 @@ import one.jfr.JfrReader;
|
||||
import one.jfr.StackTrace;
|
||||
import one.jfr.event.AllocationSample;
|
||||
import one.jfr.event.Event;
|
||||
import one.jfr.event.EventAggregator;
|
||||
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.PrintStream;
|
||||
@@ -30,11 +28,9 @@ public class JfrToFlame extends JfrConverter {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void convertChunk() throws IOException {
|
||||
collectEvents().forEach(new EventAggregator.Visitor() {
|
||||
protected void convertChunk() {
|
||||
collector.forEach(new AggregatedEventVisitor() {
|
||||
final CallStack stack = new CallStack();
|
||||
final double ticksToNanos = 1e9 / jfr.ticksPerSec;
|
||||
final boolean scale = args.total && args.lock && ticksToNanos != 1.0;
|
||||
|
||||
@Override
|
||||
public void visit(Event event, long value) {
|
||||
@@ -68,7 +64,7 @@ public class JfrToFlame extends JfrConverter {
|
||||
&& ((AllocationSample) event).tlabSize == 0 ? TYPE_KERNEL : TYPE_INLINED);
|
||||
}
|
||||
|
||||
fg.addSample(stack, scale ? (long) (value * ticksToNanos) : value);
|
||||
fg.addSample(stack, value);
|
||||
stack.clear();
|
||||
}
|
||||
}
|
||||
@@ -76,19 +72,21 @@ public class JfrToFlame extends JfrConverter {
|
||||
}
|
||||
|
||||
public void dump(OutputStream out) throws IOException {
|
||||
try (PrintStream ps = new PrintStream(out, false, "UTF-8")) {
|
||||
fg.dump(ps);
|
||||
fg.dump(out);
|
||||
}
|
||||
|
||||
public static FlameGraph parse(String input, Arguments args) throws IOException {
|
||||
try (JfrReader jfr = new JfrReader(input)) {
|
||||
JfrToFlame converter = new JfrToFlame(jfr, args);
|
||||
converter.convert();
|
||||
return converter.fg;
|
||||
}
|
||||
}
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
JfrToFlame converter;
|
||||
try (JfrReader jfr = new JfrReader(input)) {
|
||||
converter = new JfrToFlame(jfr, args);
|
||||
converter.convert();
|
||||
}
|
||||
try (FileOutputStream out = new FileOutputStream(output)) {
|
||||
converter.dump(out);
|
||||
FlameGraph fg = parse(input, args);
|
||||
try (PrintStream out = new PrintStream(output, "UTF-8")) {
|
||||
fg.dump(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
112
src/converter/one/convert/JfrToHeatmap.java
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import one.heatmap.Heatmap;
|
||||
import one.jfr.Dictionary;
|
||||
import one.jfr.JfrReader;
|
||||
import one.jfr.StackTrace;
|
||||
import one.jfr.event.AllocationSample;
|
||||
import one.jfr.event.ContendedLock;
|
||||
import one.jfr.event.Event;
|
||||
import one.jfr.event.EventCollector;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
import static one.convert.Frame.TYPE_INLINED;
|
||||
import static one.convert.Frame.TYPE_KERNEL;
|
||||
|
||||
public class JfrToHeatmap extends JfrConverter {
|
||||
private final Heatmap heatmap;
|
||||
|
||||
public JfrToHeatmap(JfrReader jfr, Arguments args) {
|
||||
super(jfr, args);
|
||||
this.heatmap = new Heatmap(args, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected EventCollector createCollector(Arguments args) {
|
||||
return new EventCollector() {
|
||||
long wallInterval;
|
||||
|
||||
private long getWallInterval() {
|
||||
if (wallInterval == 0) {
|
||||
String wall = jfr.settings.get("wall");
|
||||
long interval = Long.parseLong(wall != null ? wall : jfr.settings.get("interval"));
|
||||
wallInterval = interval != 0 ? interval : 50_000_000;
|
||||
}
|
||||
return wallInterval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(Event event) {
|
||||
int classId = 0;
|
||||
byte type = 0;
|
||||
if (event instanceof AllocationSample) {
|
||||
classId = ((AllocationSample) event).classId;
|
||||
type = ((AllocationSample) event).tlabSize == 0 ? TYPE_KERNEL : TYPE_INLINED;
|
||||
} else if (event instanceof ContendedLock) {
|
||||
classId = ((ContendedLock) event).classId;
|
||||
type = TYPE_KERNEL;
|
||||
}
|
||||
|
||||
long timeNs = jfr.eventTimeToNanos(event.time);
|
||||
long samples = event.samples();
|
||||
while (true) {
|
||||
heatmap.addEvent(event.stackTraceId, event.tid, classId, type, timeNs / 1_000_000);
|
||||
if (--samples <= 0) break;
|
||||
// Only wall clock events can have samples > 1
|
||||
timeNs += getWallInterval();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeChunk() {
|
||||
heatmap.beforeChunk();
|
||||
jfr.stackTraces.forEach(new Dictionary.Visitor<StackTrace>() {
|
||||
@Override
|
||||
public void visit(long key, StackTrace trace) {
|
||||
heatmap.addStack(key, trace.methods, trace.locations, trace.types, trace.methods.length);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterChunk() {
|
||||
jfr.stackTraces.clear();
|
||||
wallInterval = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean finish() {
|
||||
heatmap.finish(jfr.startNanos / 1_000_000);
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEach(Visitor visitor) {
|
||||
throw new AssertionError("Should not be called");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public void dump(OutputStream out) throws IOException {
|
||||
try (PrintStream ps = new PrintStream(out, false, "UTF-8")) {
|
||||
heatmap.dump(ps);
|
||||
}
|
||||
}
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
JfrToHeatmap converter;
|
||||
try (JfrReader jfr = new JfrReader(input)) {
|
||||
converter = new JfrToHeatmap(jfr, args);
|
||||
converter.convert();
|
||||
}
|
||||
try (OutputStream out = new BufferedOutputStream(new FileOutputStream(output))) {
|
||||
converter.dump(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
334
src/converter/one/convert/JfrToOtlp.java
Normal file
@@ -0,0 +1,334 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import static one.convert.OtlpConstants.*;
|
||||
|
||||
import one.jfr.Dictionary;
|
||||
import one.jfr.JfrReader;
|
||||
import one.jfr.StackTrace;
|
||||
import one.jfr.event.*;
|
||||
import one.proto.Proto;
|
||||
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* Converts .jfr output to OpenTelemetry protocol.
|
||||
*/
|
||||
public class JfrToOtlp extends JfrConverter {
|
||||
// Size in bytes to be allocated in the buffer to hold the varint containing the length of the message
|
||||
private static final int MSG_LARGE = 5;
|
||||
private static final int MSG_SMALL = 1;
|
||||
|
||||
private final Index<String> stringPool = new Index<>(String.class, "");
|
||||
private final Index<String> functionPool = new Index<>(String.class, "");
|
||||
private final Index<Line> linePool = new Index<>(Line.class, Line.EMPTY);
|
||||
private final Index<KeyValue> attributesPool = new Index<>(KeyValue.class, KeyValue.EMPTY);
|
||||
private final Index<IntArray> stacksPool = new Index<>(IntArray.class, IntArray.EMPTY);
|
||||
private final int threadNameIndex = stringPool.index(OTLP_THREAD_NAME);
|
||||
|
||||
private final Dictionary<AggregatedEvent> aggregatedEvents = new Dictionary<>();
|
||||
// Chunk-private cache to remember mappings from stacktrace ID to OTLP stack index
|
||||
private final Map<Integer, Integer> stacksIndexCache = new HashMap<>();
|
||||
private double chunkCounterFactor;
|
||||
|
||||
private final Proto proto = new Proto(1024);
|
||||
|
||||
public JfrToOtlp(JfrReader jfr, Arguments args) {
|
||||
super(jfr, args);
|
||||
}
|
||||
|
||||
public void dump(OutputStream out) throws IOException {
|
||||
out.write(proto.buffer(), 0, proto.size());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected EventCollector createCollector(Arguments args) {
|
||||
return new EventCollector() {
|
||||
public void beforeChunk() {
|
||||
chunkCounterFactor = counterFactor();
|
||||
aggregatedEvents.clear();
|
||||
stacksIndexCache.clear();
|
||||
}
|
||||
|
||||
public void collect(Event e) {
|
||||
if (excludeStack(e.stackTraceId, e.tid, 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
long key = ((long) e.tid) << 32 | e.stackTraceId;
|
||||
AggregatedEvent ec = aggregatedEvents.get(key);
|
||||
if (ec == null) {
|
||||
ec = new AggregatedEvent();
|
||||
aggregatedEvents.put(key, ec);
|
||||
}
|
||||
|
||||
long recordedValue = !args.total ? e.samples() : chunkCounterFactor == 1.0 ? e.value() : (long) (e.value() * chunkCounterFactor);
|
||||
ec.recordEvent(getUnixTimestampNanos(e.time), recordedValue);
|
||||
}
|
||||
|
||||
private long getUnixTimestampNanos(long jfrTimestamp) {
|
||||
long nanosFromStart = (long) ((jfrTimestamp - jfr.chunkStartTicks) * jfr.nanosPerTick);
|
||||
return jfr.chunkStartNanos + nanosFromStart;
|
||||
}
|
||||
|
||||
public void afterChunk() {}
|
||||
|
||||
public boolean finish() {
|
||||
aggregatedEvents.clear();
|
||||
stacksIndexCache.clear();
|
||||
return false;
|
||||
}
|
||||
|
||||
public void forEach(Visitor visitor) {
|
||||
throw new UnsupportedOperationException("Not supported");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public void convert() throws IOException {
|
||||
long rpMark = proto.startField(PROFILES_DATA_resource_profiles, MSG_LARGE);
|
||||
long spMark = proto.startField(RESOURCE_PROFILES_scope_profiles, MSG_LARGE);
|
||||
super.convert();
|
||||
proto.commitField(spMark);
|
||||
proto.commitField(rpMark);
|
||||
|
||||
writeProfileDictionary();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void convertChunk() {
|
||||
long pMark = proto.startField(SCOPE_PROFILES_profiles, MSG_LARGE);
|
||||
|
||||
long sttMark = proto.startField(PROFILE_sample_type, MSG_SMALL);
|
||||
proto.field(VALUE_TYPE_type_strindex, stringPool.index(getValueType()));
|
||||
proto.field(VALUE_TYPE_unit_strindex,
|
||||
stringPool.index(args.total ? getTotalUnits() : getSampleUnits()));
|
||||
proto.commitField(sttMark);
|
||||
|
||||
proto.fieldFixed64(PROFILE_time_unix_nano, jfr.chunkStartNanos);
|
||||
proto.field(PROFILE_duration_nanos, jfr.chunkDurationNanos());
|
||||
|
||||
aggregatedEvents.forEach((key, value) -> {
|
||||
int stackTraceId = (int) key;
|
||||
int tid = (int) (key >> 32);
|
||||
writeSample(stackTraceId, tid, value);
|
||||
});
|
||||
|
||||
proto.commitField(pMark);
|
||||
}
|
||||
|
||||
private IntArray makeStack(int stackTraceId) {
|
||||
StackTrace st = jfr.stackTraces.get(stackTraceId);
|
||||
int[] stack = new int[st.methods.length];
|
||||
for (int i = 0; i < st.methods.length; ++i) {
|
||||
stack[i] = linePool.index(makeLine(st, i));
|
||||
}
|
||||
return new IntArray(stack);
|
||||
}
|
||||
|
||||
private Line makeLine(StackTrace stackTrace, int i) {
|
||||
String methodName = getMethodName(stackTrace.methods[i], stackTrace.types[i]);
|
||||
int lineNumber = stackTrace.locations[i] >>> 16;
|
||||
int functionIdx = functionPool.index(methodName);
|
||||
return new Line(functionIdx, lineNumber);
|
||||
}
|
||||
|
||||
private void writeSample(int stackTraceId, int tid, AggregatedEvent ae) {
|
||||
// 24 is the sum of:
|
||||
// 4 tags: 1 byte
|
||||
// 5 * 2: max size of thread name and stack idx
|
||||
// 5 * 2: max size of timestamps/values arrays
|
||||
int maxLengthBytes = varintSize(24 + ae.eventsCount * (8 /* fixed64 */ + 10 /* max varint */));
|
||||
long sMark = proto.startField(PROFILE_samples, maxLengthBytes);
|
||||
|
||||
proto.field(SAMPLE_stack_index, stacksIndexCache.computeIfAbsent(stackTraceId, key -> stacksPool.index(makeStack(key))));
|
||||
|
||||
String threadName = getThreadName(tid);
|
||||
KeyValue threadNameKv = new KeyValue(threadNameIndex, threadName);
|
||||
proto.field(SAMPLE_attribute_indices, attributesPool.index(threadNameKv));
|
||||
|
||||
long tMark = proto.startField(SAMPLE_timestamps_unix_nano, varintSize(8 * ae.eventsCount));
|
||||
for (int i = 0; i < ae.eventsCount; ++i) {
|
||||
proto.writeFixed64(ae.timestamps[i]);
|
||||
}
|
||||
proto.commitField(tMark);
|
||||
|
||||
long vMark = proto.startField(SAMPLE_values, varintSize(10 * ae.eventsCount));
|
||||
for (int i = 0; i < ae.eventsCount; ++i) {
|
||||
proto.writeLong(ae.values[i]);
|
||||
}
|
||||
proto.commitField(vMark);
|
||||
|
||||
proto.commitField(sMark);
|
||||
}
|
||||
|
||||
private static int varintSize(long value) {
|
||||
return (640 - Long.numberOfLeadingZeros(value | 1) * 9) / 64;
|
||||
}
|
||||
|
||||
private void writeProfileDictionary() {
|
||||
long profilesDictionaryMark = proto.startField(PROFILES_DATA_dictionary, MSG_LARGE);
|
||||
|
||||
// Mapping[0] must be a default mapping according to the spec
|
||||
long mMark = proto.startField(PROFILES_DICTIONARY_mapping_table, MSG_SMALL);
|
||||
proto.commitField(mMark);
|
||||
|
||||
for (String name : functionPool.keys()) {
|
||||
long fMark = proto.startField(PROFILES_DICTIONARY_function_table, MSG_SMALL);
|
||||
proto.field(FUNCTION_name_strindex, stringPool.index(name));
|
||||
proto.commitField(fMark);
|
||||
}
|
||||
|
||||
for (Line line : linePool.keys()) {
|
||||
long locMark = proto.startField(PROFILES_DICTIONARY_location_table, MSG_SMALL);
|
||||
proto.field(LOCATION_mapping_index, 0);
|
||||
|
||||
long lineMark = proto.startField(LOCATION_line, MSG_SMALL);
|
||||
proto.field(LINE_function_index, line.functionIdx);
|
||||
proto.field(LINE_lines, line.lineNumber);
|
||||
proto.commitField(lineMark);
|
||||
|
||||
proto.commitField(locMark);
|
||||
}
|
||||
|
||||
for (IntArray stack : stacksPool.keys()) {
|
||||
long stackMark = proto.startField(PROFILES_DICTIONARY_stack_table, MSG_LARGE);
|
||||
long locationIndicesMark = proto.startField(STACK_location_indices, MSG_LARGE);
|
||||
for (int locationIdx : stack.array) {
|
||||
proto.writeInt(locationIdx);
|
||||
}
|
||||
proto.commitField(locationIndicesMark);
|
||||
proto.commitField(stackMark);
|
||||
}
|
||||
|
||||
for (String s : stringPool.keys()) {
|
||||
proto.field(PROFILES_DICTIONARY_string_table, s);
|
||||
}
|
||||
|
||||
for (KeyValue kv : attributesPool.keys()) {
|
||||
long aMark = proto.startField(PROFILES_DICTIONARY_attribute_table, MSG_LARGE);
|
||||
proto.field(KEY_VALUE_AND_UNIT_key_strindex, kv.keyStrindex);
|
||||
|
||||
long vMark = proto.startField(KEY_VALUE_AND_UNIT_value, MSG_LARGE);
|
||||
proto.field(ANY_VALUE_string_value, kv.value);
|
||||
proto.commitField(vMark);
|
||||
|
||||
proto.commitField(aMark);
|
||||
}
|
||||
|
||||
proto.commitField(profilesDictionaryMark);
|
||||
}
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
JfrToOtlp converter;
|
||||
try (JfrReader jfr = new JfrReader(input)) {
|
||||
converter = new JfrToOtlp(jfr, args);
|
||||
converter.convert();
|
||||
}
|
||||
try (FileOutputStream out = new FileOutputStream(output)) {
|
||||
converter.dump(out);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class Line {
|
||||
static final Line EMPTY = new Line(0, 0);
|
||||
|
||||
final int functionIdx;
|
||||
final int lineNumber;
|
||||
|
||||
Line(int functionIdx, int lineNumber) {
|
||||
this.functionIdx = functionIdx;
|
||||
this.lineNumber = lineNumber;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (!(o instanceof Line)) return false;
|
||||
|
||||
Line other = (Line) o;
|
||||
return functionIdx == other.functionIdx && lineNumber == other.lineNumber;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = 17;
|
||||
result = 31 * result + functionIdx;
|
||||
return 31 * result + lineNumber;
|
||||
}
|
||||
}
|
||||
|
||||
private static final class KeyValue {
|
||||
static final KeyValue EMPTY = new KeyValue(0, "");
|
||||
|
||||
final int keyStrindex;
|
||||
// Only string value is fine for now
|
||||
final String value;
|
||||
|
||||
KeyValue(int keyStrindex, String value) {
|
||||
this.keyStrindex = keyStrindex;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (!(o instanceof KeyValue)) return false;
|
||||
|
||||
KeyValue other = (KeyValue) o;
|
||||
return keyStrindex == other.keyStrindex && value.equals(other.value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = 17;
|
||||
result = 31 * result + keyStrindex;
|
||||
return 31 * result + value.hashCode();
|
||||
}
|
||||
}
|
||||
|
||||
private static final class IntArray {
|
||||
static final IntArray EMPTY = new IntArray(new int[0]);
|
||||
|
||||
final int[] array;
|
||||
final int hash;
|
||||
|
||||
IntArray(int[] array) {
|
||||
this.array = array;
|
||||
this.hash = Arrays.hashCode(array);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
return o instanceof IntArray && Arrays.equals(array, ((IntArray) o).array);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return hash;
|
||||
}
|
||||
}
|
||||
|
||||
private static final class AggregatedEvent {
|
||||
long[] timestamps = new long[1];
|
||||
long[] values = new long[1];
|
||||
int eventsCount = 0;
|
||||
|
||||
public void recordEvent(long timestamp, long value) {
|
||||
if (eventsCount == timestamps.length) {
|
||||
int newSize = timestamps.length * 2;
|
||||
timestamps = Arrays.copyOf(timestamps, newSize);
|
||||
values = Arrays.copyOf(values, newSize);
|
||||
}
|
||||
timestamps[eventsCount] = timestamp;
|
||||
values[eventsCount] = value;
|
||||
++eventsCount;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,6 @@ package one.convert;
|
||||
import one.jfr.JfrReader;
|
||||
import one.jfr.StackTrace;
|
||||
import one.jfr.event.Event;
|
||||
import one.jfr.event.EventAggregator;
|
||||
import one.proto.Proto;
|
||||
|
||||
import java.io.FileOutputStream;
|
||||
@@ -28,29 +27,21 @@ public class JfrToPprof extends JfrConverter {
|
||||
public JfrToPprof(JfrReader jfr, Arguments args) {
|
||||
super(jfr, args);
|
||||
|
||||
Proto sampleType;
|
||||
if (args.alloc || args.live) {
|
||||
sampleType = valueType("allocations", args.total ? "bytes" : "count");
|
||||
} else if (args.lock) {
|
||||
sampleType = valueType("locks", args.total ? "nanoseconds" : "count");
|
||||
} else {
|
||||
sampleType = valueType("cpu", args.total ? "nanoseconds" : "count");
|
||||
}
|
||||
|
||||
profile.field(1, sampleType)
|
||||
profile.field(1, valueType(getValueType(), args.total ? getTotalUnits() : getSampleUnits()))
|
||||
.field(13, strings.index("Produced by async-profiler"));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void convertChunk() throws IOException {
|
||||
collectEvents().forEach(new EventAggregator.Visitor() {
|
||||
protected void convertChunk() {
|
||||
collector.forEach(new AggregatedEventVisitor() {
|
||||
final Proto s = new Proto(100);
|
||||
final double ticksToNanos = 1e9 / jfr.ticksPerSec;
|
||||
final boolean scale = args.total && args.lock && ticksToNanos != 1.0;
|
||||
|
||||
@Override
|
||||
public void visit(Event event, long value) {
|
||||
profile.field(2, sample(s, event, scale ? (long) (value * ticksToNanos) : value));
|
||||
if (excludeStack(event.stackTraceId, event.tid, event.classId())) {
|
||||
return;
|
||||
}
|
||||
profile.field(2, sample(s, event, value));
|
||||
s.reset();
|
||||
}
|
||||
});
|
||||
@@ -81,7 +72,7 @@ public class JfrToPprof extends JfrConverter {
|
||||
}
|
||||
|
||||
private Proto sample(Proto s, Event event, long value) {
|
||||
int packedLocations = s.startField(1);
|
||||
long packedLocations = s.startField(1, 3);
|
||||
|
||||
long classId = event.classId();
|
||||
if (classId != 0) {
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import one.convert.*;
|
||||
package one.convert;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
public class Main {
|
||||
@@ -18,7 +19,7 @@ public class Main {
|
||||
return;
|
||||
}
|
||||
|
||||
if (args.files.size() == 1) {
|
||||
if (args.files.size() == (args.diff ? 2 : 1)) {
|
||||
args.files.add(".");
|
||||
}
|
||||
|
||||
@@ -35,6 +36,34 @@ public class Main {
|
||||
}
|
||||
}
|
||||
|
||||
if (args.diff) {
|
||||
if (fileCount != 2) {
|
||||
throw new IllegalArgumentException("--diff option requires two input files");
|
||||
}
|
||||
if (!"html".equals(args.output) && !"collapsed".equals(args.output)) {
|
||||
throw new IllegalArgumentException("--diff option requires html or collapsed output format");
|
||||
}
|
||||
|
||||
args.norm = true; // don't let random IDs in class names spoil comparison
|
||||
|
||||
String input1 = args.files.get(0);
|
||||
String input2 = args.files.get(1);
|
||||
String output = isDirectory ? new File(lastFile, replaceExt(input2, "diff." + args.output)).getPath() : lastFile;
|
||||
|
||||
System.out.print("Converting " + getFileName(input2) + " vs " + getFileName(input1) + " -> " + getFileName(output) + " ");
|
||||
System.out.flush();
|
||||
|
||||
long startTime = System.nanoTime();
|
||||
FlameGraph base = parseFlameGraph(input1, args);
|
||||
FlameGraph current = parseFlameGraph(input2, args);
|
||||
current.diff(base);
|
||||
current.dump(new FileOutputStream(output));
|
||||
long endTime = System.nanoTime();
|
||||
|
||||
System.out.print("# " + (endTime - startTime) / 1000000 / 1000.0 + " s\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < fileCount; i++) {
|
||||
String input = args.files.get(i);
|
||||
String output = isDirectory ? new File(lastFile, replaceExt(input, args.output)).getPath() : lastFile;
|
||||
@@ -52,10 +81,14 @@ public class Main {
|
||||
|
||||
public static void convert(String input, String output, Arguments args) throws IOException {
|
||||
if (isJfr(input)) {
|
||||
if ("html".equals(args.output)) {
|
||||
if ("html".equals(args.output) || "collapsed".equals(args.output)) {
|
||||
JfrToFlame.convert(input, output, args);
|
||||
} else if ("pprof".equals(args.output) || "pb".equals(args.output) || args.output.endsWith("gz")) {
|
||||
JfrToPprof.convert(input, output, args);
|
||||
} else if ("heatmap".equals(args.output)) {
|
||||
JfrToHeatmap.convert(input, output, args);
|
||||
} else if ("otlp".equals(args.output)) {
|
||||
JfrToOtlp.convert(input, output, args);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unrecognized output format: " + args.output);
|
||||
}
|
||||
@@ -64,11 +97,20 @@ public class Main {
|
||||
}
|
||||
}
|
||||
|
||||
public static FlameGraph parseFlameGraph(String input, Arguments args) throws IOException {
|
||||
if (isJfr(input)) {
|
||||
return JfrToFlame.parse(input, args);
|
||||
} else {
|
||||
return FlameGraph.parse(input, args);
|
||||
}
|
||||
}
|
||||
|
||||
private static String getFileName(String fileName) {
|
||||
return fileName.substring(fileName.lastIndexOf(File.separatorChar) + 1);
|
||||
}
|
||||
|
||||
private static String replaceExt(String fileName, String ext) {
|
||||
private static String replaceExt(String fileName, String output) {
|
||||
String ext = "heatmap".equals(output) ? "html" : output;
|
||||
int slash = fileName.lastIndexOf(File.separatorChar);
|
||||
int dot = fileName.lastIndexOf('.');
|
||||
return dot > slash ? fileName.substring(slash + 1, dot + 1) + ext : fileName.substring(slash + 1) + '.' + ext;
|
||||
@@ -90,14 +132,23 @@ public class Main {
|
||||
System.out.print("Usage: jfrconv [options] <input> [<input>...] <output>\n" +
|
||||
"\n" +
|
||||
"Conversion options:\n" +
|
||||
" -o --output FORMAT Output format: html, collapsed, pprof, pb.gz\n" +
|
||||
" -o --output FORMAT Output format: html, collapsed, pprof, pb.gz, heatmap, otlp\n" +
|
||||
" -I --include REGEX Include only stacks with the specified frames\n" +
|
||||
" -X --exclude REGEX Exclude stacks with the specified frames\n" +
|
||||
" --diff Create differential Flame Graph from two input files\n" +
|
||||
"\n" +
|
||||
"JFR options:\n" +
|
||||
" --cpu CPU profile\n" +
|
||||
" --cpu CPU profile (ExecutionSample)\n" +
|
||||
" --cpu-time CPU profile (CPUTimeSample)\n" +
|
||||
" --wall Wall clock profile\n" +
|
||||
" --alloc Allocation profile\n" +
|
||||
" --live Live object profile\n" +
|
||||
" --nativemem malloc profile\n" +
|
||||
" --leak Only include memory leaks in nativemem\n" +
|
||||
" --tail RATIO Ignore tail allocations for leak profiling (10% by default)\n" +
|
||||
" --lock Lock contention profile\n" +
|
||||
" --nativelock Native (pthread) lock contention profile\n" +
|
||||
" --trace Method traces / latency profile\n" +
|
||||
" -t --threads Split stack traces by threads\n" +
|
||||
" -s --state LIST Filter thread states: runnable, sleeping\n" +
|
||||
" --classify Classify samples into predefined categories\n" +
|
||||
@@ -109,14 +160,16 @@ public class Main {
|
||||
" --dot Dotted class names\n" +
|
||||
" --from TIME Start time in ms (absolute or relative)\n" +
|
||||
" --to TIME End time in ms (absolute or relative)\n" +
|
||||
" --latency MS Retain only samples within MethodTraces of at least MS milliseconds\n" +
|
||||
"\n" +
|
||||
"Flame Graph options:\n" +
|
||||
" --title STRING Flame Graph title\n" +
|
||||
" --minwidth X Skip frames smaller than X%\n" +
|
||||
" --grain X Coarsen Flame Graph to the given grain size\n" +
|
||||
" --skip N Skip N bottom frames\n" +
|
||||
" -r --reverse Reverse stack traces (icicle graph)\n" +
|
||||
" -I --include REGEX Include only stacks with the specified frames\n" +
|
||||
" -X --exclude REGEX Exclude stacks with the specified frames\n" +
|
||||
" -r --reverse Reverse stack traces (defaults to icicle graph)\n" +
|
||||
" -i --inverted Toggles the layout for reversed stacktraces from icicle to flamegraph\n" +
|
||||
" and for default stacktraces from flamegraph to icicle\n" +
|
||||
" --highlight REGEX Highlight frames matching the given pattern\n");
|
||||
}
|
||||
}
|
||||
63
src/converter/one/convert/OtlpConstants.java
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
final class OtlpConstants {
|
||||
|
||||
static final String OTLP_THREAD_NAME = "thread.name";
|
||||
|
||||
static final int
|
||||
PROFILES_DICTIONARY_mapping_table = 1,
|
||||
PROFILES_DICTIONARY_location_table = 2,
|
||||
PROFILES_DICTIONARY_function_table = 3,
|
||||
PROFILES_DICTIONARY_string_table = 5,
|
||||
PROFILES_DICTIONARY_attribute_table = 6,
|
||||
PROFILES_DICTIONARY_stack_table = 7;
|
||||
|
||||
static final int
|
||||
PROFILES_DATA_resource_profiles = 1,
|
||||
PROFILES_DATA_dictionary = 2;
|
||||
|
||||
static final int RESOURCE_PROFILES_scope_profiles = 2;
|
||||
|
||||
static final int SCOPE_PROFILES_profiles = 2;
|
||||
|
||||
static final int
|
||||
PROFILE_sample_type = 1,
|
||||
PROFILE_samples = 2,
|
||||
PROFILE_time_unix_nano = 3,
|
||||
PROFILE_duration_nanos = 4;
|
||||
|
||||
static final int
|
||||
VALUE_TYPE_type_strindex = 1,
|
||||
VALUE_TYPE_unit_strindex = 2,
|
||||
VALUE_TYPE_aggregation_temporality = 3;
|
||||
|
||||
static final int
|
||||
SAMPLE_stack_index = 1,
|
||||
SAMPLE_values = 2,
|
||||
SAMPLE_attribute_indices = 3,
|
||||
SAMPLE_timestamps_unix_nano = 5;
|
||||
|
||||
static final int
|
||||
STACK_location_indices = 1;
|
||||
|
||||
static final int
|
||||
LOCATION_mapping_index = 1,
|
||||
LOCATION_line = 3;
|
||||
|
||||
static final int
|
||||
LINE_function_index = 1,
|
||||
LINE_lines = 2;
|
||||
|
||||
static final int FUNCTION_name_strindex = 1;
|
||||
|
||||
static final int
|
||||
KEY_VALUE_AND_UNIT_key_strindex = 1,
|
||||
KEY_VALUE_AND_UNIT_value = 2;
|
||||
|
||||
static final int ANY_VALUE_string_value = 1;
|
||||
}
|
||||
38
src/converter/one/convert/ResourceProcessor.java
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.PrintStream;
|
||||
|
||||
public class ResourceProcessor {
|
||||
|
||||
public static String getResource(String name) {
|
||||
try (InputStream stream = ResourceProcessor.class.getResourceAsStream(name)) {
|
||||
if (stream == null) {
|
||||
throw new IOException("No resource found");
|
||||
}
|
||||
|
||||
ByteArrayOutputStream result = new ByteArrayOutputStream();
|
||||
byte[] buffer = new byte[32768];
|
||||
for (int length; (length = stream.read(buffer)) != -1; ) {
|
||||
result.write(buffer, 0, length);
|
||||
}
|
||||
return result.toString("UTF-8");
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Can't load resource with name " + name);
|
||||
}
|
||||
}
|
||||
|
||||
public static String printTill(PrintStream out, String data, String till) {
|
||||
int index = data.indexOf(till);
|
||||
out.print(data.substring(0, index));
|
||||
return data.substring(index + till.length());
|
||||
}
|
||||
|
||||
}
|
||||
79
src/converter/one/convert/TimeIntervals.java
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.convert;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.Arrays;
|
||||
|
||||
public final class TimeIntervals {
|
||||
private final long[] startIntervals;
|
||||
private final long[] endIntervals;
|
||||
|
||||
private TimeIntervals(long[] startIntervals, long[] endIntervals) {
|
||||
this.startIntervals = startIntervals;
|
||||
this.endIntervals = endIntervals;
|
||||
}
|
||||
|
||||
public boolean contains(long instant) {
|
||||
int searchOut = Arrays.binarySearch(startIntervals, instant);
|
||||
if (searchOut >= 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
int insertionPoint = -(searchOut + 1); // First element greater than instant
|
||||
if (insertionPoint == 0) {
|
||||
return false; // First interval start is greater than instant
|
||||
}
|
||||
int startIndex = insertionPoint - 1;
|
||||
return instant <= endIntervals[startIndex];
|
||||
}
|
||||
|
||||
public static final class Builder {
|
||||
// No overlapping intervals
|
||||
private final TreeMap<Long, Long> timeIntervals = new TreeMap<>();
|
||||
|
||||
public void add(long startInstant, long endInstant) {
|
||||
if (startInstant > endInstant) {
|
||||
throw new IllegalArgumentException("'startInstant' should not be after 'endInstant'");
|
||||
}
|
||||
|
||||
// Are there shorter intervals which overlap with the new interval?
|
||||
NavigableMap<Long, Long> view = timeIntervals.subMap(startInstant, true /* inclusive */, endInstant, true /* inclusive */);
|
||||
Map.Entry<Long, Long> last = view.pollLastEntry();
|
||||
if (last != null) {
|
||||
endInstant = Long.max(last.getValue(), endInstant);
|
||||
}
|
||||
view.clear();
|
||||
|
||||
// Perhaps the end of the interval before 'view' ends after startInstant?
|
||||
Map.Entry<Long, Long> floor = timeIntervals.floorEntry(startInstant);
|
||||
if (floor != null) {
|
||||
long floorEnd = floor.getValue();
|
||||
if (floorEnd >= startInstant) {
|
||||
timeIntervals.remove(floor.getKey());
|
||||
startInstant = floor.getKey();
|
||||
endInstant = Long.max(endInstant, floorEnd);
|
||||
}
|
||||
}
|
||||
|
||||
timeIntervals.put(startInstant, endInstant);
|
||||
}
|
||||
|
||||
public TimeIntervals build() {
|
||||
long[] startIntervals = new long[timeIntervals.size()];
|
||||
long[] endIntervals = new long[timeIntervals.size()];
|
||||
int index = 0;
|
||||
for (Map.Entry<Long, Long> entry : timeIntervals.entrySet()) {
|
||||
startIntervals[index] = entry.getKey();
|
||||
endIntervals[index] = entry.getValue();
|
||||
++index;
|
||||
}
|
||||
return new TimeIntervals(startIntervals, endIntervals);
|
||||
}
|
||||
}
|
||||
}
|
||||
586
src/converter/one/heatmap/Heatmap.java
Normal file
@@ -0,0 +1,586 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.util.*;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import one.convert.*;
|
||||
import one.jfr.DictionaryInt;
|
||||
|
||||
public class Heatmap {
|
||||
|
||||
// TODO: should be probably an argument,
|
||||
// but there is a good chance that changing it will have some side effects
|
||||
public static final int BLOCK_DURATION_MS = 20;
|
||||
|
||||
private final Arguments args;
|
||||
private State state;
|
||||
private long startMs;
|
||||
|
||||
public Heatmap(Arguments args, JfrConverter converter) {
|
||||
this.args = args;
|
||||
this.state = new State(converter, args, BLOCK_DURATION_MS);
|
||||
}
|
||||
|
||||
public void addEvent(int stackTraceId, int threadId, int classId, byte type, long timeMs) {
|
||||
state.addEvent(stackTraceId, threadId, classId, type, timeMs);
|
||||
}
|
||||
|
||||
public void addStack(long id, long[] methods, int[] locations, byte[] types, int size) {
|
||||
state.addStack(id, methods, locations, types, size);
|
||||
}
|
||||
|
||||
public void beforeChunk() {
|
||||
state.methodCache.clear();
|
||||
state.includeCache.clear();
|
||||
}
|
||||
|
||||
public void finish(long startMs) {
|
||||
this.startMs = startMs;
|
||||
state.methodCache.clear();
|
||||
state.stackTracesCache.clear();
|
||||
state.includeCache.clear();
|
||||
}
|
||||
|
||||
private EvaluationContext evaluate() {
|
||||
State state = this.state;
|
||||
this.state = null;
|
||||
return new EvaluationContext(
|
||||
state.sampleList.samples(),
|
||||
state.methods,
|
||||
state.stackTracesRemap.orderedTraces(),
|
||||
state.symbolTable.keys()
|
||||
);
|
||||
}
|
||||
|
||||
private void compressMethods(HtmlOut out, Method[] methods) {
|
||||
out.writeVar(methods.length);
|
||||
for (Method method : methods) {
|
||||
out.writeVar(method.className);
|
||||
out.writeVar(method.methodName);
|
||||
out.write18(method.location & 0xffff);
|
||||
out.write18(method.location >>> 16);
|
||||
out.write6(method.type);
|
||||
}
|
||||
}
|
||||
|
||||
public void dump(PrintStream stream) throws IOException {
|
||||
if (state.sampleList.getRecordsCount() == 0) {
|
||||
// Need a better way to handle this, but we should not throw an exception
|
||||
stream.println("No samples found");
|
||||
return;
|
||||
}
|
||||
|
||||
EvaluationContext evaluationContext = evaluate();
|
||||
|
||||
String tail = ResourceProcessor.getResource("/heatmap.html");
|
||||
|
||||
tail = ResourceProcessor.printTill(stream, tail, "/*executionsHeatmap:*/");
|
||||
HtmlOut out = new HtmlOut(stream);
|
||||
stream.print('S');
|
||||
printHeatmap(out, evaluationContext);
|
||||
stream.print('E');
|
||||
|
||||
tail = ResourceProcessor.printTill(stream, tail, "/*methods:*/");
|
||||
out.reset();
|
||||
stream.print('S');
|
||||
printMethods(out, evaluationContext);
|
||||
stream.print('E');
|
||||
|
||||
tail = ResourceProcessor.printTill(stream, tail, "/*title:*/");
|
||||
stream.print(args.title != null ? args.title : "Heatmap");
|
||||
|
||||
tail = ResourceProcessor.printTill(stream, tail, "/*startMs:*/0");
|
||||
stream.print(startMs);
|
||||
|
||||
tail = ResourceProcessor.printTill(stream, tail, "/*cpool:*/");
|
||||
printConstantPool(stream, evaluationContext);
|
||||
|
||||
stream.print(tail);
|
||||
}
|
||||
|
||||
private void printHeatmap(final HtmlOut out, EvaluationContext context) {
|
||||
int veryStart = out.pos();
|
||||
int wasPos = out.pos();
|
||||
|
||||
// calculates methods frequency during building the tree
|
||||
int[] stackChunksBuffer = buildLz78TreeAndPrepareData(context);
|
||||
|
||||
// gives methods new ids, more frequent (in tree's data) methods will have lower id
|
||||
renameMethodsByFrequency(context);
|
||||
|
||||
// writes "starts" - ids of methods that indicates a start of a next stack trace
|
||||
writeStartMethods(out, context);
|
||||
wasPos = debugStep("start methods", out, wasPos, veryStart);
|
||||
|
||||
// writes block sizes, compressed by huffman algorithm
|
||||
writeBlockSizes(out, context);
|
||||
wasPos = debugStep("stack sizes", out, wasPos, veryStart);
|
||||
|
||||
// NOTE: destroys internal state!
|
||||
SynonymTable synonymTable = context.nodeTree.extractSynonymTable();
|
||||
synonymTable.calculateSynonyms();
|
||||
// writes frequent lz tree nodes as a synonyms table
|
||||
writeSynonymsTable(out, synonymTable);
|
||||
wasPos = debugStep("tree synonyms", out, wasPos, veryStart);
|
||||
|
||||
// writes lz tree with two pairs of var-ints: [parent node id] + [method id of this node]
|
||||
writeTree(out, synonymTable, context);
|
||||
wasPos = debugStep("tree body", out, wasPos, veryStart);
|
||||
|
||||
// calculate counts for the next synonyms table, that will be used for samples
|
||||
int chunksCount = calculateSamplesSynonyms(synonymTable, context, stackChunksBuffer);
|
||||
// writes frequent lz tree nodes as a synonyms table (for sample chunks)
|
||||
writeSynonymsTable(out, synonymTable);
|
||||
wasPos = debugStep("samples synonyms", out, wasPos, veryStart);
|
||||
|
||||
// writes sample chunks as var-ints references for [node id]
|
||||
writeSamples(out, synonymTable, context, stackChunksBuffer);
|
||||
debugStep("samples body", out, wasPos, veryStart);
|
||||
debug("storage size: " + context.nodeTree.storageSize());
|
||||
|
||||
out.write30(context.nodeTree.nodesCount());
|
||||
out.write30(context.sampleList.blockSizes.length);
|
||||
out.write30(context.nodeTree.storageSize());
|
||||
out.write30(chunksCount);
|
||||
out.write30(context.sampleList.stackIds.length);
|
||||
}
|
||||
|
||||
private void writeSamples(HtmlOut out, SynonymTable synonymTable, EvaluationContext context,
|
||||
int[] stackChunksBuffer) {
|
||||
for (int stackId : context.sampleList.stackIds) {
|
||||
int chunksStart = stackChunksBuffer[stackId * 2];
|
||||
int chunksEnd = stackChunksBuffer[stackId * 2 + 1];
|
||||
|
||||
for (int i = chunksStart; i < chunksEnd; i++) {
|
||||
int nodeId = stackChunksBuffer[i];
|
||||
out.writeVar(synonymTable.nodeIdOrSynonym(nodeId));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private int calculateSamplesSynonyms(SynonymTable synonymTable, EvaluationContext context,
|
||||
int[] stackChunksBuffer) {
|
||||
int chunksCount = 0;
|
||||
int[] childrenCount = synonymTable.reset();
|
||||
|
||||
for (int stackId : context.sampleList.stackIds) {
|
||||
int chunksStart = stackChunksBuffer[stackId * 2];
|
||||
int chunksEnd = stackChunksBuffer[stackId * 2 + 1];
|
||||
|
||||
for (int i = chunksStart; i < chunksEnd; i++) {
|
||||
childrenCount[stackChunksBuffer[i]]--; // negation for reverse sort
|
||||
chunksCount++;
|
||||
}
|
||||
}
|
||||
|
||||
synonymTable.calculateSynonyms();
|
||||
return chunksCount;
|
||||
}
|
||||
|
||||
private void writeTree(HtmlOut out, SynonymTable synonymTable, EvaluationContext context) {
|
||||
long[] data = context.nodeTree.treeData();
|
||||
int dataSize = context.nodeTree.treeDataSize();
|
||||
for (int i = 0; i < dataSize; i++) {
|
||||
long d = data[i];
|
||||
int parentId = context.nodeTree.extractParentId(d);
|
||||
int methodId = context.nodeTree.extractMethodId(d);
|
||||
|
||||
out.writeVar(synonymTable.nodeIdOrSynonym(parentId));
|
||||
out.writeVar(context.orderedMethods[methodId].frequencyBasedId);
|
||||
}
|
||||
}
|
||||
|
||||
private void writeSynonymsTable(HtmlOut out, SynonymTable synonymTable) {
|
||||
out.writeVar(synonymTable.synonymsCount());
|
||||
for (int i = 0; i < synonymTable.synonymsCount(); i++) {
|
||||
out.writeVar(synonymTable.synonymAt(i));
|
||||
}
|
||||
}
|
||||
|
||||
private void writeStartMethods(HtmlOut out, EvaluationContext context) {
|
||||
int startsCount = 0;
|
||||
for (Method method : context.orderedMethods) {
|
||||
if (method.start) {
|
||||
startsCount++;
|
||||
}
|
||||
}
|
||||
out.writeVar(startsCount);
|
||||
for (Method method : context.orderedMethods) {
|
||||
if (method.start) {
|
||||
out.writeVar(method.frequencyBasedId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void renameMethodsByFrequency(EvaluationContext context) {
|
||||
Method[] methodsByFrequency = context.orderedMethods.clone();
|
||||
Arrays.sort(methodsByFrequency, new Comparator<Method>() {
|
||||
@Override
|
||||
public int compare(Method o1, Method o2) {
|
||||
return Integer.compare(o2.frequency, o1.frequency);
|
||||
}
|
||||
});
|
||||
|
||||
for (int i = 0; i < methodsByFrequency.length; i++) {
|
||||
Method method = methodsByFrequency[i];
|
||||
method.frequencyBasedId = i + 1; // zero is reserved for no method
|
||||
}
|
||||
}
|
||||
|
||||
private int[] buildLz78TreeAndPrepareData(EvaluationContext context) {
|
||||
int[] samples = context.sampleList.stackIds;
|
||||
|
||||
// prepared data for output, firstly used to remember last stack positions
|
||||
int[] stackBuffer = new int[(context.stackTraces.length + 1) * 16];
|
||||
|
||||
// remember the last position of stackId
|
||||
for (int i = 0; i < samples.length; i++) {
|
||||
int stackId = samples[i];
|
||||
stackBuffer[stackId * 2] = ~i; // rewrites data multiple times, the last one wins
|
||||
}
|
||||
|
||||
int chunksIterator = context.stackTraces.length * 2 + 1;
|
||||
|
||||
// builds the tree and prepares data for the last stack
|
||||
for (int i = 0; i < samples.length; i++) {
|
||||
int stackId = samples[i];
|
||||
int current = 0;
|
||||
int[] stack = context.stackTraces[stackId];
|
||||
|
||||
if (i == ~stackBuffer[stackId * 2]) { // last version of that stack
|
||||
stackBuffer[stackId * 2] = chunksIterator; // start
|
||||
|
||||
for (int methodId : stack) {
|
||||
current = context.nodeTree.appendChild(current, methodId);
|
||||
if (current == 0) { // so we are starting from root again, it will be written to output as Lz78 element - [parent node id; method id]
|
||||
context.orderedMethods[methodId].frequency++;
|
||||
if (stackBuffer.length == chunksIterator) {
|
||||
stackBuffer = Arrays.copyOf(stackBuffer, chunksIterator + chunksIterator / 2);
|
||||
}
|
||||
|
||||
int justAppendedId = context.nodeTree.nodesCount() - 1;
|
||||
stackBuffer[chunksIterator++] = justAppendedId;
|
||||
context.nodeTree.markNodeAsLastlyUsed(justAppendedId);
|
||||
}
|
||||
}
|
||||
|
||||
if (current != 0) {
|
||||
if (stackBuffer.length == chunksIterator) {
|
||||
stackBuffer = Arrays.copyOf(stackBuffer, chunksIterator + chunksIterator / 2);
|
||||
}
|
||||
|
||||
stackBuffer[chunksIterator++] = current;
|
||||
context.nodeTree.markNodeAsLastlyUsed(current);
|
||||
}
|
||||
|
||||
stackBuffer[stackId * 2 + 1] = chunksIterator; // end
|
||||
} else { // general case
|
||||
for (int methodId : stack) {
|
||||
current = context.nodeTree.appendChild(current, methodId);
|
||||
if (current == 0) { // so we are starting from root again, it will be written to output as Lz78 element - [parent node id; method id]
|
||||
context.orderedMethods[methodId].frequency++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removes unused chunks
|
||||
context.nodeTree.compactTree(stackBuffer, context.stackTraces.length * 2 + 1, chunksIterator);
|
||||
|
||||
return stackBuffer;
|
||||
}
|
||||
|
||||
private void writeBlockSizes(HtmlOut out, EvaluationContext context) {
|
||||
int[] blockSizeFrequencies = new int[1024];
|
||||
int maxBlockSize = 0;
|
||||
for (int blockSize : context.sampleList.blockSizes) {
|
||||
if (blockSize >= blockSizeFrequencies.length) {
|
||||
blockSizeFrequencies = Arrays.copyOf(blockSizeFrequencies, blockSize * 2);
|
||||
}
|
||||
blockSizeFrequencies[blockSize]++;
|
||||
maxBlockSize = Math.max(maxBlockSize, blockSize);
|
||||
}
|
||||
|
||||
HuffmanEncoder encoder = new HuffmanEncoder(blockSizeFrequencies, maxBlockSize);
|
||||
|
||||
long[] decodeTable = encoder.calculateOutputTable();
|
||||
|
||||
out.writeVar(decodeTable.length);
|
||||
int maxBits = (int) (decodeTable[decodeTable.length - 1] >>> 56);
|
||||
out.writeVar(maxBits);
|
||||
|
||||
for (long l : decodeTable) {
|
||||
out.writeVar(l & 0x00FF_FFFF_FFFF_FFFFL);
|
||||
out.writeVar(l >>> 56);
|
||||
}
|
||||
|
||||
for (int blockSize : context.sampleList.blockSizes) {
|
||||
if (encoder.append(blockSize)) {
|
||||
for (int value : encoder.values) {
|
||||
out.nextByte(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (encoder.flushIfNeed()) {
|
||||
for (int value : encoder.values) {
|
||||
out.nextByte(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void printConstantPool(PrintStream out, EvaluationContext evaluationContext) {
|
||||
for (String symbol : evaluationContext.symbols) {
|
||||
out.print('"');
|
||||
out.print(symbol.replace("\\", "\\\\").replace("\"", "\\\""));
|
||||
out.print("\",");
|
||||
}
|
||||
}
|
||||
|
||||
private void printMethods(HtmlOut out, EvaluationContext evaluationContext) throws IOException {
|
||||
debug("methods count " + evaluationContext.orderedMethods.length);
|
||||
Arrays.sort(evaluationContext.orderedMethods, new Comparator<Method>() {
|
||||
@Override
|
||||
public int compare(Method o1, Method o2) {
|
||||
return Integer.compare(o1.frequencyBasedId, o2.frequencyBasedId);
|
||||
}
|
||||
});
|
||||
out.nextByte('A');
|
||||
compressMethods(out, evaluationContext.orderedMethods);
|
||||
out.nextByte('A');
|
||||
}
|
||||
|
||||
private int debugStep(String step, HtmlOut out, int wasPos, int veryStartPos) {
|
||||
int nowPos = out.pos();
|
||||
debug(step + " " + (nowPos - wasPos) / (1024.0 * 1024.0) + " MB");
|
||||
debug(step + " pos in data " + (nowPos - veryStartPos));
|
||||
return nowPos;
|
||||
}
|
||||
|
||||
private void debug(String text) {
|
||||
// Basically, no user will ever need that, but it will be helpful to debug broken data
|
||||
// System.out.println(text);
|
||||
}
|
||||
|
||||
private static class EvaluationContext {
|
||||
final Method[] orderedMethods;
|
||||
final int[][] stackTraces;
|
||||
final String[] symbols;
|
||||
|
||||
final SampleList.Result sampleList;
|
||||
|
||||
final LzNodeTree nodeTree = new LzNodeTree();
|
||||
|
||||
EvaluationContext(SampleList.Result sampleList, Index<Method> methods, int[][] stackTraces, String[] symbols) {
|
||||
this.sampleList = sampleList;
|
||||
this.stackTraces = stackTraces;
|
||||
this.symbols = symbols;
|
||||
orderedMethods = methods.keys();
|
||||
}
|
||||
}
|
||||
|
||||
private static class State {
|
||||
|
||||
private static final int LIMIT = Integer.MAX_VALUE;
|
||||
|
||||
final JfrConverter converter;
|
||||
final Arguments args;
|
||||
final SampleList sampleList;
|
||||
final StackStorage stackTracesRemap = new StackStorage();
|
||||
|
||||
// Maps stack trace ID to prototype ID in stackTracesRemap
|
||||
final DictionaryInt stackTracesCache = new DictionaryInt();
|
||||
final Map<MethodKey, Integer> methodCache = new HashMap<>();
|
||||
final BidirectionalIndex<Method> methods = new BidirectionalIndex<>(Method.class, Method.EMPTY);
|
||||
final BidirectionalIndex<String> symbolTable = new BidirectionalIndex<>(String.class, "");
|
||||
|
||||
// Cache for exclude/include filter results per prototype ID
|
||||
final Map<Integer, Boolean> includeCache = new HashMap<>();
|
||||
|
||||
// reusable array to (temporary) store (potentially) new stack trace
|
||||
int[] cachedStackTrace = new int[4096];
|
||||
|
||||
State(JfrConverter converter, Arguments args, long blockDurationMs) {
|
||||
this.converter = converter;
|
||||
this.args = args;
|
||||
this.sampleList = new SampleList(blockDurationMs);
|
||||
}
|
||||
|
||||
private String resolveFrameName(Method method) {
|
||||
if (method.className == 0) {
|
||||
return symbolTable.getKey(method.methodName);
|
||||
}
|
||||
if (method.methodName == 0) {
|
||||
return symbolTable.getKey(method.className);
|
||||
}
|
||||
return symbolTable.getKey(method.className) + '.' + symbolTable.getKey(method.methodName);
|
||||
}
|
||||
|
||||
private boolean includeStack(int prototypeId) {
|
||||
if (args.include == null && args.exclude == null) {
|
||||
return true;
|
||||
}
|
||||
return includeCache.computeIfAbsent(prototypeId, stackId -> applyIncludeExcludeFilter(stackId));
|
||||
}
|
||||
|
||||
// Returns true if the stack should be included
|
||||
private boolean applyIncludeExcludeFilter(int stackId) {
|
||||
int[] stack = stackTracesRemap.get(stackId);
|
||||
Pattern include = args.include;
|
||||
Pattern exclude = args.exclude;
|
||||
for (int i = 0; i < stack.length; i++) {
|
||||
Method method = methods.getKey(stack[i]);
|
||||
String name = resolveFrameName(method);
|
||||
if (exclude != null && exclude.matcher(name).matches()) {
|
||||
return false;
|
||||
}
|
||||
if (include != null && include.matcher(name).matches()) {
|
||||
if (exclude == null) return true;
|
||||
include = null;
|
||||
}
|
||||
}
|
||||
return include == null;
|
||||
}
|
||||
|
||||
public void addEvent(int stackTraceId, int threadId, int classId, byte type, long timeMs) {
|
||||
if (sampleList.getRecordsCount() >= LIMIT || stackTraceId == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int prototypeId = stackTracesCache.get(stackTraceId);
|
||||
if (classId == 0 && !args.threads) {
|
||||
if (includeStack(prototypeId)) {
|
||||
sampleList.add(prototypeId, timeMs);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int[] prototype = stackTracesRemap.get(prototypeId);
|
||||
int stackSize = prototype.length + (args.threads ? 1 : 0) + (classId != 0 ? 1 : 0);
|
||||
if (cachedStackTrace.length < stackSize) {
|
||||
cachedStackTrace = new int[stackSize * 2];
|
||||
}
|
||||
|
||||
if (args.threads) {
|
||||
MethodKey key = new MethodKey(MethodKeyType.THREAD, threadId, -1, Frame.TYPE_NATIVE, true);
|
||||
cachedStackTrace[0] = getMethodIndex(key);
|
||||
}
|
||||
|
||||
System.arraycopy(prototype, 0, cachedStackTrace, args.threads ? 1 : 0, prototype.length);
|
||||
|
||||
if (classId != 0) {
|
||||
MethodKey key = new MethodKey(MethodKeyType.CLASS, classId, -1, type, false);
|
||||
cachedStackTrace[stackSize - 1] = getMethodIndex(key);
|
||||
}
|
||||
|
||||
int newStackId = stackTracesRemap.index(cachedStackTrace, stackSize);
|
||||
if (includeStack(newStackId)) {
|
||||
sampleList.add(newStackId, timeMs);
|
||||
}
|
||||
}
|
||||
|
||||
public void addStack(long id, long[] methods, int[] locations, byte[] types, int size) {
|
||||
if (cachedStackTrace.length < size) {
|
||||
cachedStackTrace = new int[size * 2];
|
||||
}
|
||||
|
||||
for (int i = size - 1; i >= 0; i--) {
|
||||
long methodId = methods[i];
|
||||
byte type = types[i];
|
||||
int location = locations[i];
|
||||
|
||||
int index = size - 1 - i;
|
||||
boolean firstMethodInTrace = index == 0;
|
||||
|
||||
// When args.threads is true, the first frame is the artificial thread frame
|
||||
boolean firstFrameInStack = firstMethodInTrace && !args.threads;
|
||||
|
||||
MethodKey key = new MethodKey(MethodKeyType.METHOD, methodId, location, type, firstFrameInStack);
|
||||
cachedStackTrace[index] = getMethodIndex(key);
|
||||
}
|
||||
|
||||
stackTracesCache.put(id, stackTracesRemap.index(cachedStackTrace, size));
|
||||
}
|
||||
|
||||
private int getMethodIndex(MethodKey key) {
|
||||
Integer oldIdx = methodCache.get(key);
|
||||
if (oldIdx != null) return oldIdx;
|
||||
|
||||
int newIdx = methods.index(key.makeMethod(converter, symbolTable));
|
||||
methodCache.put(key, newIdx);
|
||||
return newIdx;
|
||||
}
|
||||
|
||||
private static final class MethodKey {
|
||||
private final long methodId;
|
||||
// 32 bits: location
|
||||
// 8 bits: type
|
||||
// 1 bit: firstInStack
|
||||
private final long metadata;
|
||||
// Used to infer what type of method to create
|
||||
private final MethodKeyType keyType;
|
||||
|
||||
public MethodKey(MethodKeyType keyType, long methodId, int location, byte type, boolean firstInStack) {
|
||||
this.keyType = keyType;
|
||||
this.methodId = methodId;
|
||||
this.metadata = (long) (firstInStack ? 1 : 0) << 40 | (type & 0xffL) << 32 | (location & 0xFFFFFFFFL);
|
||||
}
|
||||
|
||||
public int getLocation() {
|
||||
return (int) metadata;
|
||||
}
|
||||
|
||||
public byte getType() {
|
||||
return (byte) (metadata >> 32);
|
||||
}
|
||||
|
||||
public boolean getFirstInStack() {
|
||||
return ((metadata >> 40) & 1L) != 0;
|
||||
}
|
||||
|
||||
public Method makeMethod(JfrConverter converter, Index<String> symbolTable) {
|
||||
switch (keyType) {
|
||||
case METHOD:
|
||||
StackTraceElement ste = converter.getStackTraceElement(methodId, getType(), getLocation());
|
||||
int className = symbolTable.index(ste.getClassName());
|
||||
int methodName = symbolTable.index(ste.getMethodName());
|
||||
return new Method(className, methodName, getLocation(), getType(), getFirstInStack());
|
||||
|
||||
case THREAD:
|
||||
String threadName = converter.getThreadName(Math.toIntExact(methodId));
|
||||
return new Method(0, symbolTable.index(threadName), getLocation(), getType(), getFirstInStack());
|
||||
|
||||
case CLASS:
|
||||
String javaClassName = converter.getClassName(methodId);
|
||||
return new Method(symbolTable.index(javaClassName), 0, getLocation(), getType(), getFirstInStack());
|
||||
|
||||
default:
|
||||
throw new IllegalArgumentException("Unexpected keyType: " + keyType);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (!(other instanceof MethodKey)) return false;
|
||||
MethodKey methodKey = (MethodKey) other;
|
||||
return methodId == methodKey.methodId && metadata == methodKey.metadata && keyType == methodKey.keyType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 * (31 * Long.hashCode(methodId) + Long.hashCode(metadata)) + keyType.hashCode();
|
||||
}
|
||||
}
|
||||
|
||||
private enum MethodKeyType {
|
||||
METHOD, THREAD, CLASS
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
85
src/converter/one/heatmap/HtmlOut.java
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.io.PrintStream;
|
||||
|
||||
public class HtmlOut {
|
||||
|
||||
private final PrintStream out;
|
||||
|
||||
private int pos;
|
||||
|
||||
public HtmlOut(PrintStream out) {
|
||||
this.out = out;
|
||||
}
|
||||
|
||||
public int pos() {
|
||||
return pos;
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
pos = 0;
|
||||
}
|
||||
|
||||
public void nextByte(int c) {
|
||||
switch (c) {
|
||||
case 0:
|
||||
c = 127;
|
||||
break;
|
||||
case '\r':
|
||||
c = 126;
|
||||
break;
|
||||
case '&':
|
||||
c = 125;
|
||||
break;
|
||||
case '<':
|
||||
c = 124;
|
||||
break;
|
||||
case '>':
|
||||
c = 123;
|
||||
break;
|
||||
}
|
||||
out.write(c);
|
||||
pos++;
|
||||
}
|
||||
|
||||
public void writeVar(long v) {
|
||||
while (v >= 61) {
|
||||
int b = 61 + (int) (v % 61);
|
||||
nextByte(b);
|
||||
v /= 61;
|
||||
}
|
||||
nextByte((int) v);
|
||||
}
|
||||
|
||||
public void write6(int v) {
|
||||
if ((v & ~0x3F) != 0) {
|
||||
throw new IllegalArgumentException("Value " + v + " is out of bounds");
|
||||
}
|
||||
nextByte(v);
|
||||
}
|
||||
|
||||
public void write18(int v) {
|
||||
if ((v & ~0x3FFFF) != 0) {
|
||||
throw new IllegalArgumentException("Value " + v + " is out of bounds");
|
||||
}
|
||||
for (int i = 0; i < 3; i++) {
|
||||
nextByte(v & 0x3F);
|
||||
v >>>= 6;
|
||||
}
|
||||
}
|
||||
|
||||
public void write30(int v) {
|
||||
if ((v & ~0x3FFFFFFF) != 0) {
|
||||
throw new IllegalArgumentException("Value " + v + " is out of bounds");
|
||||
}
|
||||
for (int i = 0; i < 5; i++) {
|
||||
nextByte(v & 0x3F);
|
||||
v >>>= 6;
|
||||
}
|
||||
}
|
||||
}
|
||||
152
src/converter/one/heatmap/HuffmanEncoder.java
Normal file
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.PriorityQueue;
|
||||
|
||||
public class HuffmanEncoder {
|
||||
|
||||
private final long[] decodeTable; // 8 bit for bits count, 56 value
|
||||
private final long[] encodeTable; // 8 bit for bits count, 56 bits
|
||||
|
||||
private int data;
|
||||
private int bits;
|
||||
|
||||
// log2(123^9) = 62.4826305481 > 62 bits, 0.7% space lost, but it is expensive to decode (no support for int64 in js)
|
||||
// log2(123^4) = 27.7700580214 > 27 bits, 2.8% space lost, but it is cheap to decode (using one int32)
|
||||
private static final int MAX_BITS = 27;
|
||||
public final int[] values = new int[4]; // 0..122
|
||||
|
||||
public HuffmanEncoder(int[] frequencies, int maxFrequencyIndex) {
|
||||
PriorityQueue<Node> minHeap = new PriorityQueue<>(maxFrequencyIndex + 1);
|
||||
for (int i = 0; i <= maxFrequencyIndex; i++) {
|
||||
int frequency = frequencies[i];
|
||||
if (frequency == 0) {
|
||||
continue;
|
||||
}
|
||||
minHeap.add(new Node(frequency, i));
|
||||
}
|
||||
|
||||
while (minHeap.size() > 1) {
|
||||
Node left = minHeap.remove();
|
||||
Node right = minHeap.remove();
|
||||
|
||||
minHeap.add(new Node(left, right));
|
||||
}
|
||||
|
||||
long[] decodeTable = new long[maxFrequencyIndex + 1];
|
||||
minHeap.remove().fillTable(decodeTable, 0);
|
||||
Arrays.sort(decodeTable);
|
||||
for (int i = 0; i < decodeTable.length; i++) {
|
||||
if (decodeTable[i] != 0) {
|
||||
if (i != 0) {
|
||||
long[] nextDecodeTable = new long[decodeTable.length - i];
|
||||
System.arraycopy(decodeTable, i, nextDecodeTable, 0, nextDecodeTable.length);
|
||||
decodeTable = nextDecodeTable;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
this.decodeTable = decodeTable;
|
||||
|
||||
encodeTable = new long[maxFrequencyIndex + 1];
|
||||
encodeTable[(int) decodeTable[0]] = decodeTable[0] & 0xFF00_0000_0000_0000L;
|
||||
long code = 0;
|
||||
|
||||
for (int i = 1; i < decodeTable.length; i++) {
|
||||
long decodePrev = decodeTable[i - 1];
|
||||
long decodeNow = decodeTable[i];
|
||||
|
||||
long prevCount = decodePrev >>> 56;
|
||||
long nowCount = decodeNow >>> 56;
|
||||
|
||||
code = (code + 1) << (nowCount - prevCount);
|
||||
|
||||
int value = (int) decodeNow;
|
||||
encodeTable[value] = nowCount << 56 | code;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean append(int value) {
|
||||
boolean hasOverflow = false;
|
||||
|
||||
long v = encodeTable[value];
|
||||
int bits = (int) (v >>> 56);
|
||||
for (long i = 1L << (bits - 1); i > 0; i >>>= 1) {
|
||||
this.data = this.data << 1 | ((v & i) == 0 ? 0 : 1);
|
||||
if (++this.bits == MAX_BITS) {
|
||||
hasOverflow = true;
|
||||
flush();
|
||||
}
|
||||
}
|
||||
|
||||
return hasOverflow;
|
||||
}
|
||||
|
||||
public boolean flushIfNeed() {
|
||||
if (bits == 0) {
|
||||
return false;
|
||||
}
|
||||
this.data = this.data << (MAX_BITS - bits);
|
||||
flush();
|
||||
return true;
|
||||
}
|
||||
|
||||
public void flush() {
|
||||
data = Integer.reverse(data) >>> 5;
|
||||
|
||||
values[3] = data % 123;
|
||||
data /= 123;
|
||||
values[2] = data % 123;
|
||||
data /= 123;
|
||||
values[1] = data % 123;
|
||||
data /= 123;
|
||||
values[0] = data;
|
||||
data = 0;
|
||||
|
||||
bits = 0;
|
||||
}
|
||||
|
||||
public long[] calculateOutputTable() {
|
||||
return decodeTable;
|
||||
}
|
||||
|
||||
private static class Node implements Comparable<Node> {
|
||||
final int frequency;
|
||||
final int value;
|
||||
|
||||
Node left, right;
|
||||
|
||||
Node(int frequency, int value) {
|
||||
this.frequency = frequency;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public Node(Node left, Node right) {
|
||||
this.left = left;
|
||||
this.right = right;
|
||||
this.frequency = left.frequency + right.frequency;
|
||||
this.value = -1;
|
||||
}
|
||||
|
||||
public void fillTable(long[] table, long bitsCount) {
|
||||
if (value >= 0) {
|
||||
table[value] = bitsCount | value;
|
||||
return;
|
||||
}
|
||||
left.fillTable(table, bitsCount + 0x0100_0000_0000_0000L);
|
||||
right.fillTable(table, bitsCount + 0x0100_0000_0000_0000L);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Node o) {
|
||||
// frequencies are strictly positive
|
||||
return frequency - o.frequency;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
173
src/converter/one/heatmap/LzNodeTree.java
Normal file
@@ -0,0 +1,173 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class LzNodeTree {
|
||||
|
||||
private static final int INITIAL_CAPACITY = 2 * 1024 * 1024;
|
||||
|
||||
// hash(methodId << 32 | parentNodeId) -> methodId << 32 | parentNodeId
|
||||
private long[] keys; // reused by SynonymTable
|
||||
// hash(methodId << 32 | parentNodeId) -> childNodeId
|
||||
private int[] values; // can be reused after buildLz78TreeAndPrepareData
|
||||
|
||||
// (nodeId - 1) -> methodId << 32 | parentNodeId
|
||||
private long[] outputData; // can be reused after writeTree:130!
|
||||
// nodeId -> childrenCount
|
||||
private int[] childrenCount; // reused by SynonymTable
|
||||
// nodeId -> parentNodeId << 32 | lengthToRoot
|
||||
private long[] lengthToRoot;
|
||||
|
||||
private int storageSize = 0;
|
||||
private int nodesCount = 1;
|
||||
|
||||
public LzNodeTree() {
|
||||
keys = new long[INITIAL_CAPACITY];
|
||||
values = new int[INITIAL_CAPACITY];
|
||||
|
||||
outputData = new long[INITIAL_CAPACITY / 2];
|
||||
childrenCount = new int[INITIAL_CAPACITY / 2];
|
||||
lengthToRoot = new long[INITIAL_CAPACITY / 2];
|
||||
}
|
||||
|
||||
public int appendChild(int parentNode, int methodId) {
|
||||
long method = (long) methodId << 32;
|
||||
long key = method | parentNode;
|
||||
|
||||
int mask = keys.length - 1;
|
||||
int i = hashCode(key) & mask;
|
||||
while (true) {
|
||||
long k = keys[i];
|
||||
if (k == 0) {
|
||||
break;
|
||||
}
|
||||
if (k == key) {
|
||||
return values[i];
|
||||
}
|
||||
i = (i + 1) & mask;
|
||||
}
|
||||
|
||||
if (nodesCount >= outputData.length) {
|
||||
outputData = Arrays.copyOf(outputData, nodesCount + nodesCount / 2);
|
||||
childrenCount = Arrays.copyOf(childrenCount, nodesCount + nodesCount / 2);
|
||||
lengthToRoot = Arrays.copyOf(lengthToRoot, nodesCount + nodesCount / 2);
|
||||
}
|
||||
|
||||
lengthToRoot[nodesCount] = ((int) lengthToRoot[parentNode] + 1) | ((long) parentNode << 32);
|
||||
outputData[nodesCount - 1] = key;
|
||||
keys[i] = key;
|
||||
values[i] = nodesCount;
|
||||
|
||||
if (nodesCount * 2 > keys.length) {
|
||||
resize(keys.length * 2);
|
||||
}
|
||||
nodesCount++;
|
||||
|
||||
childrenCount[parentNode]--; // negotiation for better sort
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
public long[] treeData() {
|
||||
return outputData;
|
||||
}
|
||||
|
||||
public int treeDataSize() {
|
||||
return nodesCount - 1;
|
||||
}
|
||||
|
||||
public int extractParentId(long treeElement) {
|
||||
return (int) treeElement;
|
||||
}
|
||||
|
||||
public int extractMethodId(long treeElement) {
|
||||
return (int) (treeElement >>> 32);
|
||||
}
|
||||
|
||||
public void markNodeAsLastlyUsed(int nodeId) {
|
||||
long ltr = lengthToRoot[nodeId];
|
||||
int parent = (int) (ltr >>> 32);
|
||||
if (parent >= 0) {
|
||||
lengthToRoot[nodeId] = ltr | 0x8000000000000000L;
|
||||
do {
|
||||
ltr = lengthToRoot[parent];
|
||||
lengthToRoot[parent] = ltr | 0xC000000000000000L;
|
||||
parent = (int) (ltr >>> 32);
|
||||
} while (parent > 0);
|
||||
}
|
||||
}
|
||||
|
||||
// destroys values
|
||||
public void compactTree(int[] remapAsWell, int fromIndex, int toIndex) {
|
||||
int[] mappings = values;
|
||||
mappings[0] = 0;
|
||||
int nodes = 1;
|
||||
int storageSize = 0;
|
||||
for (int oldNodeID = 1; oldNodeID < nodesCount; oldNodeID++) {
|
||||
long ltr = lengthToRoot[oldNodeID];
|
||||
if (ltr >= 0) {
|
||||
// unused
|
||||
continue;
|
||||
}
|
||||
if ((ltr & 0x4000000000000000L) == 0) {
|
||||
storageSize += (int) ltr;
|
||||
}
|
||||
mappings[oldNodeID] = nodes;
|
||||
childrenCount[nodes] = childrenCount[oldNodeID];
|
||||
long out = outputData[oldNodeID - 1];
|
||||
long outMethod = 0xFFFFFFFF00000000L & out;
|
||||
int oldParent = (int) out;
|
||||
outputData[nodes - 1] = outMethod | mappings[oldParent];
|
||||
nodes++;
|
||||
}
|
||||
for (int i = fromIndex; i < toIndex; i++) {
|
||||
remapAsWell[i] = mappings[remapAsWell[i]];
|
||||
}
|
||||
this.storageSize = storageSize;
|
||||
this.nodesCount = nodes;
|
||||
}
|
||||
|
||||
// destroys keys and childrenCount arrays
|
||||
public SynonymTable extractSynonymTable() {
|
||||
return new SynonymTable(keys, childrenCount, nodesCount);
|
||||
}
|
||||
|
||||
public int storageSize() {
|
||||
return storageSize;
|
||||
}
|
||||
|
||||
public int nodesCount() {
|
||||
return nodesCount;
|
||||
}
|
||||
|
||||
private void resize(int newCapacity) {
|
||||
long[] newKeys = new long[newCapacity];
|
||||
int[] newValues = new int[newCapacity];
|
||||
int mask = newKeys.length - 1;
|
||||
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
if (keys[i] != 0) {
|
||||
for (int j = hashCode(keys[i]) & mask; ; j = (j + 1) & mask) {
|
||||
if (newKeys[j] == 0) {
|
||||
newKeys[j] = keys[i];
|
||||
newValues[j] = values[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
keys = newKeys;
|
||||
values = newValues;
|
||||
}
|
||||
|
||||
private static int hashCode(long key) {
|
||||
key *= 0xc6a4a7935bd1e995L;
|
||||
return (int) (key ^ (key >>> 32));
|
||||
}
|
||||
}
|
||||
56
src/converter/one/heatmap/Method.java
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import one.convert.Frame;
|
||||
|
||||
public class Method {
|
||||
|
||||
public static final Method EMPTY = new Method(0, 0, -1, (byte) 0, false);
|
||||
|
||||
public final int className;
|
||||
public final int methodName;
|
||||
public final int location;
|
||||
public final byte type;
|
||||
public final boolean start;
|
||||
|
||||
public int frequency;
|
||||
// An identifier based on frequency ordering, more frequent methods will get a lower ID
|
||||
public int frequencyBasedId;
|
||||
public int index;
|
||||
|
||||
Method(int className, int methodName, int location, byte type, boolean start) {
|
||||
this.className = className;
|
||||
this.methodName = methodName;
|
||||
this.location = location;
|
||||
this.type = type;
|
||||
this.start = start;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Method method = (Method) o;
|
||||
|
||||
if (className != method.className) return false;
|
||||
if (methodName != method.methodName) return false;
|
||||
if (location != method.location) return false;
|
||||
if (type != method.type) return false;
|
||||
return start == method.start;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = className;
|
||||
result = 31 * result + methodName;
|
||||
result = 31 * result + location;
|
||||
result = 31 * result + (int) type;
|
||||
result = 31 * result + (start ? 1 : 0);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
94
src/converter/one/heatmap/SampleList.java
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright The async-profiler authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package one.heatmap;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class SampleList {
|
||||
|
||||
private static final int DEFAULT_SAMPLES_COUNT = 10_000_000;
|
||||
|
||||
private final long blockDurationMs;
|
||||
|
||||
// highest 32 bits for time block index, lowest 32 bits for stack id
|
||||
private long[] data = new long[DEFAULT_SAMPLES_COUNT];
|
||||
|
||||
private long initialTime = 0;
|
||||
private int recordsCount = 0;
|
||||
|
||||
public SampleList(long blockDurationMs) {
|
||||
this.blockDurationMs = blockDurationMs;
|
||||
}
|
||||
|
||||
public void add(int stackId, long timeMs) {
|
||||
if (initialTime == 0) {
|
||||
initialTime = timeMs;
|
||||
data[recordsCount++] = stackId;
|
||||
return;
|
||||
}
|
||||
if (recordsCount >= data.length) {
|
||||
data = Arrays.copyOf(data, data.length * 3 / 2);
|
||||
}
|
||||
|
||||
int currentTimeBlock = (int) ((timeMs - initialTime) / blockDurationMs);
|
||||
data[recordsCount++] = (long) currentTimeBlock << 32 | stackId;
|
||||
}
|
||||
|
||||
public Result samples() {
|
||||
Arrays.sort(data, 0, recordsCount);
|
||||
|
||||
int firstBlockId = (int) (data[0] >> 32);
|
||||
int lastBlockId = (int) (data[recordsCount - 1] >> 32);
|
||||
|
||||
int blocksCount = lastBlockId - firstBlockId + 1;
|
||||
|
||||
int[] blockSizes = new int[blocksCount];
|
||||
int[] stackIds = new int[recordsCount];
|
||||
|
||||
int stackIdsPos = 0;
|
||||
int currentBlockIndex = 0;
|
||||
int currentBlockSize = 0;
|
||||
int currentBlockId = firstBlockId;
|
||||
|
||||
outer:
|
||||
while (stackIdsPos < stackIds.length) {
|
||||
long currentData = data[stackIdsPos];
|
||||
int blockId = (int) (currentData >> 32);
|
||||
while (currentBlockId != blockId) {
|
||||
blockSizes[currentBlockIndex++] = currentBlockSize;
|
||||
currentBlockSize = 0;
|
||||
currentBlockId++;
|
||||
if (currentBlockId > lastBlockId) {
|
||||
break outer;
|
||||
}
|
||||
}
|
||||
|
||||
currentBlockSize++;
|
||||
stackIds[stackIdsPos++] = (int) currentData - 1;
|
||||
}
|
||||
|
||||
if (currentBlockId <= lastBlockId) {
|
||||
blockSizes[currentBlockIndex] = currentBlockSize;
|
||||
}
|
||||
|
||||
return new Result(blockSizes, stackIds);
|
||||
}
|
||||
|
||||
public int getRecordsCount() {
|
||||
return recordsCount;
|
||||
}
|
||||
|
||||
public static class Result {
|
||||
public final int[] blockSizes;
|
||||
public final int[] stackIds;
|
||||
|
||||
public Result(int[] blockSizes, int[] stackIds) {
|
||||
this.blockSizes = blockSizes;
|
||||
this.stackIds = stackIds;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||