6.0
2024-05-02T18:35:25Z
7df96b18c230490a9a0a9e2307226338
Templates
47d3c2ff933947368d4bee4b1184d69b
ZFS on Linux
ZFS on Linux
OpenZFS (formerly ZFS on Linux) template.
Home of the project: https://github.com/Cosium/zabbix_zfs-on-linux
Templates
-
4ecabdcb2104460f83c2ad5f18fd98f9
OpenZFS version
vfs.file.contents[/sys/module/zfs/version]
1h
30d
0
TEXT
Application
ZFS
041efc8ff1ac40ed99953a8929ed3ff3
(last(/ZFS on Linux/vfs.file.contents[/sys/module/zfs/version],#1)<>last(/ZFS on Linux/vfs.file.contents[/sys/module/zfs/version],#2))>0
Version of OpenZFS is now {ITEM.VALUE} on {HOST.NAME}
INFO
-
6b5fc935fe194d30badea64eaf3f317f
ZFS ARC stat arc_dnode_limit
zfs.arcstats[arc_dnode_limit]
30d
B
Application
ZFS
Application
ZFS ARC
-
0b7d673688e3429d92aa349762729f83
ZFS ARC stat arc_meta_limit
zfs.arcstats[arc_meta_limit]
30d
B
Application
ZFS
Application
ZFS ARC
-
b0b5004458494182bf874545f8eb4e41
ZFS ARC stat arc_meta_used
zfs.arcstats[arc_meta_used]
30d
B
arc_meta_used = hdr_size + metadata_size + dbuf_size + dnode_size + bonus_size
Application
ZFS
Application
ZFS ARC
-
795ab079ba13461c872ee1d5c0295704
ZFS ARC stat bonus_size
zfs.arcstats[bonus_size]
30d
B
Application
ZFS
Application
ZFS ARC
-
34a1fb79b2b64ce08ec5b377211372d7
ZFS ARC max size
zfs.arcstats[c_max]
30d
B
Application
ZFS
Application
ZFS ARC
-
d60b8e4f7a3d4bea972e7fe04c3bb5ca
ZFS ARC minimum size
zfs.arcstats[c_min]
30d
B
Application
ZFS
Application
ZFS ARC
-
5e12dd98f1644f5a87cc5ded5d2e55d8
ZFS ARC stat data_size
zfs.arcstats[data_size]
30d
B
Application
ZFS
Application
ZFS ARC
-
522a0f33c90047bab4f55b7214f51dea
ZFS ARC stat dbuf_size
zfs.arcstats[dbuf_size]
30d
B
Application
ZFS
Application
ZFS ARC
-
a3d10ebb57984a829f780a229fc9617c
ZFS ARC stat dnode_size
zfs.arcstats[dnode_size]
30d
B
Application
ZFS
Application
ZFS ARC
-
184eef57aa034cf8acaf6a8f0e02395b
ZFS ARC stat hdr_size
zfs.arcstats[hdr_size]
30d
B
Application
ZFS
Application
ZFS ARC
-
cb7bcc02dfc14329a361e194145871c0
ZFS ARC stat hits
zfs.arcstats[hits]
30d
CHANGE_PER_SECOND
Application
ZFS
Application
ZFS ARC
-
8df273b6e0904c9ab140f8f13f6ca973
ZFS ARC stat metadata_size
zfs.arcstats[metadata_size]
30d
B
Application
ZFS
Application
ZFS ARC
-
dcd96743ed984018bff5d16105693606
ZFS ARC stat mfu_hits
zfs.arcstats[mfu_hits]
30d
CHANGE_PER_SECOND
Application
ZFS
Application
ZFS ARC
-
1015ebe8ef6f4626ae7967bf6358f1b3
ZFS ARC stat mfu_size
zfs.arcstats[mfu_size]
30d
B
Application
ZFS
Application
ZFS ARC
-
1298a265a6784e63a166b768e1faf67e
ZFS ARC stat misses
zfs.arcstats[misses]
30d
CHANGE_PER_SECOND
Application
ZFS
Application
ZFS ARC
-
c85d0e9e1b464748a20148e2f2507609
ZFS ARC stat mru_hits
zfs.arcstats[mru_hits]
30d
CHANGE_PER_SECOND
Application
ZFS
Application
ZFS ARC
-
50954c7b43d745d09990011df4d7448c
ZFS ARC stat mru_size
zfs.arcstats[mru_size]
30d
B
Application
ZFS
Application
ZFS ARC
-
cd225da5a02346a58dbe0c9808a628eb
ZFS ARC current size
zfs.arcstats[size]
30d
B
Application
ZFS
Application
ZFS ARC
-
8c8129f814fe47ae9c71e636599acd90
ZFS ARC Cache Hit Ratio
CALCULATED
zfs.arcstats_hit_ratio
30d
FLOAT
%
100*(last(//zfs.arcstats[hits])/(last(//zfs.arcstats[hits])+count(//zfs.arcstats[hits],#1,,"0")+last(//zfs.arcstats[misses])))
Application
ZFS
Application
ZFS ARC
-
e644390a9c9743f2844dbc9ef8806a8f
ZFS ARC total read
CALCULATED
zfs.arcstats_total_read
30d
B
last(//zfs.arcstats[hits])+last(//zfs.arcstats[misses])
Application
ZFS
Application
ZFS ARC
-
ebfb742fb123451c9632d12bde0957c4
ZFS parameter zfs_arc_dnode_limit_percent
zfs.get.param[zfs_arc_dnode_limit_percent]
1h
30d
%
Application
ZFS
Application
ZFS ARC
-
18d8b817852848929f4e0b421cb21532
ZFS parameter zfs_arc_meta_limit_percent
zfs.get.param[zfs_arc_meta_limit_percent]
1h
30d
%
Application
ZFS
Application
ZFS ARC
-
194b7c100d124710ac6ec994c154ab36
ZFS Uptime
zfs.uptime
FLOAT
seconds
Needed for iostats
Application
ZFS
a82a1b7067904fecb06bcf5b88457192
Zfs Dataset discovery
zfs.fileset.discovery
30m
2d
Discover ZFS dataset.
4d7c96bd10b44754b2c8790b90c12046
Zfs dataset {#FILESETNAME} compressratio
zfs.get.compressratio[{#FILESETNAME}]
30m
30d
FLOAT
%
MULTIPLIER
100
Application
ZFS
Application
ZFS dataset
e9df401ae71e45c8a3fdbbd146cdd57b
Zfs dataset {#FILESETNAME} available
zfs.get.fsinfo[{#FILESETNAME},available]
5m
30d
B
Application
ZFS
Application
ZFS dataset
ed63bb6942364281bcea80c54b6f8fcc
Zfs dataset {#FILESETNAME} referenced
zfs.get.fsinfo[{#FILESETNAME},referenced]
5m
30d
B
Application
ZFS
Application
ZFS dataset
7ef4530ddf464defb2a64ce674a82c8c
Zfs dataset {#FILESETNAME} usedbychildren
zfs.get.fsinfo[{#FILESETNAME},usedbychildren]
5m
30d
B
Application
ZFS
Application
ZFS dataset
3c7f982147be49629c78aa67a1d8d56e
Zfs dataset {#FILESETNAME} usedbydataset
zfs.get.fsinfo[{#FILESETNAME},usedbydataset]
1h
30d
B
Application
ZFS
Application
ZFS dataset
cc0e02c58b28443eb78eeacc81095966
Zfs dataset {#FILESETNAME} usedbysnapshots
zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]
5m
30d
B
Application
ZFS
Application
ZFS dataset
a54feffafdb34ba08f1474ab4710088d
Zfs dataset {#FILESETNAME} used
zfs.get.fsinfo[{#FILESETNAME},used]
5m
30d
B
Application
ZFS
Application
ZFS dataset
cc0b0756d2fe42779b62adf63e38681d
( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_AVERAGE_ALERT}/100)
More than {$ZFS_AVERAGE_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
AVERAGE
More than {$ZFS_HIGH_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_HIGH_ALERT}/100)
8bfb157ac42845c0b340e28ae510833c
( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_DISASTER_ALERT}/100)
More than {$ZFS_DISASTER_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
DISASTER
9b592a2cba084bec9ceb4f82367e758b
( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_HIGH_ALERT}/100)
More than {$ZFS_HIGH_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
HIGH
More than {$ZFS_DISASTER_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_DISASTER_ALERT}/100)
5213684719404718b8956d6faf0e6b71
ZFS dataset {#FILESETNAME} usage
STACKED
FIXED
1
3333FF
-
ZFS on Linux
zfs.get.fsinfo[{#FILESETNAME},usedbydataset]
2
FF33FF
-
ZFS on Linux
zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]
3
FF3333
-
ZFS on Linux
zfs.get.fsinfo[{#FILESETNAME},usedbychildren]
4
33FF33
-
ZFS on Linux
zfs.get.fsinfo[{#FILESETNAME},available]
08039e570bd7417294d043f4f7bf960f
Zfs Pool discovery
zfs.pool.discovery
1h
3d
9f889e9529934fdfbf47a29de32468f0
Zpool {#POOLNAME} available
zfs.get.fsinfo[{#POOLNAME},available]
5m
30d
B
Application
ZFS
Application
ZFS zpool
1993c04b00bc428bbdf43c909967afd2
Zpool {#POOLNAME} used
zfs.get.fsinfo[{#POOLNAME},used]
5m
30d
B
Application
ZFS
Application
ZFS zpool
472e21c79759476984cbf4ce9f12580a
Zpool {#POOLNAME} Health
zfs.zpool.health[{#POOLNAME}]
5m
30d
0
TEXT
Application
ZFS
Application
ZFS zpool
4855fe0ed61b444daad73aa6090b46af
find(/ZFS on Linux/zfs.zpool.health[{#POOLNAME}],,"like","ONLINE")=0
Zpool {#POOLNAME} is {ITEM.VALUE} on {HOST.NAME}
HIGH
7c8c97e5e76a4ea3ba41613f1eee4daf
Zpool {#POOLNAME} read throughput
CALCULATED
zfs.zpool.iostat.nread[{#POOLNAME}]
Bps
round(last(//zfs.zpool.iostat.nread_avg[{#POOLNAME}])*last(//zfs.uptime),0)
CHANGE_PER_SECOND
3207e6ffd0fa40c4a1d6e607e4e12375
Zpool {#POOLNAME} AVG since boot read throughput
DEPENDENT
zfs.zpool.iostat.nread_avg[{#POOLNAME}]
0
30d
Bps
JSONPATH
$[2]
zfs.zpool.iostats[{#POOLNAME}]
Application
ZFS
Application
ZFS zpool
e7567b2eacd04b8c9602e30d1245c318
Zpool {#POOLNAME} write throughput
CALCULATED
zfs.zpool.iostat.nwritten[{#POOLNAME}]
Bps
round(last(//zfs.zpool.iostat.nwritten_avg[{#POOLNAME}])*last(//zfs.uptime),0)
CHANGE_PER_SECOND
78b418605f9b45b29bbd33b93a6b2e82
Zpool {#POOLNAME} AVG since boot write throughput
DEPENDENT
zfs.zpool.iostat.nwritten_avg[{#POOLNAME}]
0
30d
Bps
JSONPATH
$[3]
zfs.zpool.iostats[{#POOLNAME}]
Application
ZFS
Application
ZFS zpool
2be4764e02bf4b518850f1a46abc07e6
Zpool {#POOLNAME} IOPS: reads
CALCULATED
zfs.zpool.iostat.reads[{#POOLNAME}]
iops
round(last(//zfs.zpool.iostat.reads_avg[{#POOLNAME}])*last(//zfs.uptime),0)
CHANGE_PER_SECOND
6b35bf06bf4542318a7999ac4d7952f7
Zpool {#POOLNAME} IOPS AVG since boot: reads
DEPENDENT
zfs.zpool.iostat.reads_avg[{#POOLNAME}]
0
30d
iops
JSONPATH
$[0]
zfs.zpool.iostats[{#POOLNAME}]
Application
ZFS
Application
ZFS zpool
ba9be67e02ea41aead4cb3474b606369
Zpool {#POOLNAME} IOPS: writes
CALCULATED
zfs.zpool.iostat.writes[{#POOLNAME}]
iops
round(last(//zfs.zpool.iostat.writes_avg[{#POOLNAME}])*last(//zfs.uptime),0)
CHANGE_PER_SECOND
b99d5ab922324536bc2e013ac1fca306
Zpool {#POOLNAME} IOPS AVG since boot: writes
DEPENDENT
zfs.zpool.iostat.writes_avg[{#POOLNAME}]
0
30d
iops
JSONPATH
$[1]
zfs.zpool.iostats[{#POOLNAME}]
Application
ZFS
Application
ZFS zpool
5bb6b079ec63416c8fbd3c9bfa9e06da
Zpool {#POOLNAME}: Get zpool iostats
zfs.zpool.iostats[{#POOLNAME}]
0
0
TEXT
REGEX
(\w+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+).*$
["\4", "\5", "\6", "\7"]
Application
ZFS
Application
ZFS zpool
867075d6eb1743069be868007472192b
Zpool {#POOLNAME} scrub status
zfs.zpool.scrub[{#POOLNAME}]
5m
30d
Detect if the pool is currently scrubbing itself.
This is not a bad thing itself, but it slows down the entire pool and should be terminated when on production server during business hours if it causes a noticeable slowdown.
ZFS zpool scrub status
Application
ZFS
Application
ZFS zpool
792be07c555c4ae6a9819d69d332357b
max(/ZFS on Linux/zfs.zpool.scrub[{#POOLNAME}],12h)=0
Zpool {#POOLNAME} is scrubbing for more than 12h on {HOST.NAME}
AVERAGE
Zpool {#POOLNAME} is scrubbing for more than 24h on {HOST.NAME}
max(/ZFS on Linux/zfs.zpool.scrub[{#POOLNAME}],24h)=0
04cac9633f164227b1f9b2fe26923609
max(/ZFS on Linux/zfs.zpool.scrub[{#POOLNAME}],24h)=0
Zpool {#POOLNAME} is scrubbing for more than 24h on {HOST.NAME}
HIGH
82fce07b30114c7e8645689317e2c1b4
( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_AVERAGE_ALERT}/100)
More than {$ZPOOL_AVERAGE_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
AVERAGE
More than {$ZPOOL_HIGH_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_HIGH_ALERT}/100)
ab56a2a8eb3d4b4294707e2a8aa94e22
( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_DISASTER_ALERT}/100)
More than {$ZPOOL_DISASTER_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
DISASTER
c9c22e6617af4ad09970d2988c4a7fe7
( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_HIGH_ALERT}/100)
More than {$ZPOOL_HIGH_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
HIGH
More than {$ZPOOL_DISASTER_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_DISASTER_ALERT}/100)
926abae3e18144f0899711fdfd16e808
ZFS zpool {#POOLNAME} IOPS
FIXED
1
5C6BC0
-
ZFS on Linux
zfs.zpool.iostat.reads_avg[{#POOLNAME}]
2
EF5350
-
ZFS on Linux
zfs.zpool.iostat.writes_avg[{#POOLNAME}]
63ae2d7acd4d4d15b4c5e7a5a90a063a
ZFS zpool {#POOLNAME} space usage
STACKED
1
00EE00
-
ZFS on Linux
zfs.get.fsinfo[{#POOLNAME},available]
2
EE0000
-
ZFS on Linux
zfs.get.fsinfo[{#POOLNAME},used]
aa35d164bacd45c5983fd2856781da88
ZFS zpool {#POOLNAME} throughput
FIXED
1
5C6BC0
-
ZFS on Linux
zfs.zpool.iostat.nread_avg[{#POOLNAME}]
2
BOLD_LINE
EF5350
-
ZFS on Linux
zfs.zpool.iostat.nwritten_avg[{#POOLNAME}]
6c96e092f08f4b98af9a377782180689
Zfs vdev discovery
zfs.vdev.discovery
1h
3d
9f63161726774a28905c87aac92cf1e9
vdev {#VDEV}: CHECKSUM error counter
zfs.vdev.error_counter.cksum[{#VDEV}]
5m
30d
This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
If yes, use 'zpool replace' to replace the device.
If not, clear the error with 'zpool clear'.
Application
ZFS
Application
ZFS vdev
48a02eb060fd4b73bdde08a2795c4717
vdev {#VDEV}: READ error counter
zfs.vdev.error_counter.read[{#VDEV}]
5m
30d
This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
If yes, use 'zpool replace' to replace the device.
If not, clear the error with 'zpool clear'.
Application
ZFS
Application
ZFS vdev
15953ba38fde4b8c8681955a27d9204a
vdev {#VDEV}: WRITE error counter
zfs.vdev.error_counter.write[{#VDEV}]
5m
30d
This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
If yes, use 'zpool replace' to replace the device.
If not, clear the error with 'zpool clear'.
Application
ZFS
Application
ZFS vdev
3e64a59d2a154a89a3bc43483942302d
vdev {#VDEV}: total number of errors
CALCULATED
zfs.vdev.error_total[{#VDEV}]
5m
30d
last(//zfs.vdev.error_counter.cksum[{#VDEV}])+last(//zfs.vdev.error_counter.read[{#VDEV}])+last(//zfs.vdev.error_counter.write[{#VDEV}])
This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
If yes, use 'zpool replace' to replace the device.
If not, clear the error with 'zpool clear'.
Application
ZFS
Application
ZFS vdev
44f7667c275d4a04891bc4f1d00e668b
last(/ZFS on Linux/zfs.vdev.error_total[{#VDEV}])>0
vdev {#VDEV} has encountered {ITEM.VALUE} errors on {HOST.NAME}
HIGH
This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
If yes, use 'zpool replace' to replace the device.
If not, clear the error with 'zpool clear'.
You may also run a zpool scrub to check if some other undetected errors are present on this vdev.
ab78dba991ba4311a04740fc69b30381
ZFS vdev {#VDEV} errors
FIXED
CC00CC
-
ZFS on Linux
zfs.vdev.error_counter.cksum[{#VDEV}]
1
F63100
-
ZFS on Linux
zfs.vdev.error_counter.read[{#VDEV}]
2
BBBB00
-
ZFS on Linux
zfs.vdev.error_counter.write[{#VDEV}]
{$ZFS_ARC_META_ALERT}
90
{$ZFS_AVERAGE_ALERT}
90
{$ZFS_DISASTER_ALERT}
99
{$ZFS_HIGH_ALERT}
95
{$ZPOOL_AVERAGE_ALERT}
85
{$ZPOOL_DISASTER_ALERT}
99
{$ZPOOL_HIGH_ALERT}
90
180e8c0dc05946e4b8552e3a01df347f
ZFS ARC
GRAPH_CLASSIC
24
5
GRAPH
graphid
ZFS on Linux
ZFS ARC memory usage
INTEGER
source_type
0
GRAPH_CLASSIC
5
24
5
GRAPH
graphid
ZFS on Linux
ZFS ARC Cache Hit Ratio
INTEGER
source_type
0
GRAPH_CLASSIC
10
24
5
GRAPH
graphid
ZFS on Linux
ZFS ARC breakdown
INTEGER
source_type
0
GRAPH_CLASSIC
15
24
5
GRAPH
graphid
ZFS on Linux
ZFS ARC arc_meta_used breakdown
INTEGER
source_type
0
442dda5c36c04fc78c3a73eacf26bc7f
ZFS zpools
GRAPH_PROTOTYPE
8
5
INTEGER
columns
1
GRAPH_PROTOTYPE
graphid
ZFS on Linux
ZFS zpool {#POOLNAME} IOPS
INTEGER
rows
1
INTEGER
source_type
2
GRAPH_PROTOTYPE
8
8
5
INTEGER
columns
1
GRAPH_PROTOTYPE
graphid
ZFS on Linux
ZFS zpool {#POOLNAME} throughput
INTEGER
rows
1
INTEGER
source_type
2
GRAPH_PROTOTYPE
16
8
5
INTEGER
columns
1
GRAPH_PROTOTYPE
graphid
ZFS on Linux
ZFS zpool {#POOLNAME} space usage
INTEGER
rows
1
INTEGER
source_type
2
d1d7b0898d06481dbcec8b02d915fb1c
ZFS zpool scrub status
0
Scrub in progress
1
No scrub in progress
1daac44b853b4b6da767c9c3af96b774
last(/ZFS on Linux/zfs.arcstats[dnode_size])>(last(/ZFS on Linux/zfs.arcstats[arc_dnode_limit])*0.9)
ZFS ARC dnode size > 90% dnode max size on {HOST.NAME}
HIGH
69c18b7ceb3d4da2bda0e05f9a12453f
last(/ZFS on Linux/zfs.arcstats[arc_meta_used])>(last(/ZFS on Linux/zfs.arcstats[arc_meta_limit])*0.01*{$ZFS_ARC_META_ALERT})
ZFS ARC meta size > {$ZFS_ARC_META_ALERT}% meta max size on {HOST.NAME}
HIGH
1510111dc5414e6d80a5230ce6a81f1d
ZFS ARC arc_meta_used breakdown
STACKED
FIXED
3333FF
-
ZFS on Linux
zfs.arcstats[metadata_size]
1
00EE00
-
ZFS on Linux
zfs.arcstats[dnode_size]
2
EE0000
-
ZFS on Linux
zfs.arcstats[hdr_size]
3
EEEE00
-
ZFS on Linux
zfs.arcstats[dbuf_size]
4
EE00EE
-
ZFS on Linux
zfs.arcstats[bonus_size]
203eeeaadc9444ccbbc31cf043e836cb
ZFS ARC breakdown
STACKED
FIXED
3333FF
-
ZFS on Linux
zfs.arcstats[data_size]
1
00AA00
-
ZFS on Linux
zfs.arcstats[metadata_size]
2
EE0000
-
ZFS on Linux
zfs.arcstats[dnode_size]
3
CCCC00
-
ZFS on Linux
zfs.arcstats[hdr_size]
4
A54F10
-
ZFS on Linux
zfs.arcstats[dbuf_size]
5
888888
-
ZFS on Linux
zfs.arcstats[bonus_size]
4c493303be4a45a7a96d3ef7246843c0
ZFS ARC Cache Hit Ratio
FIXED
FIXED
00CC00
-
ZFS on Linux
zfs.arcstats_hit_ratio
b2fce9515a7d4218a5e9015f212c2a60
ZFS ARC memory usage
FIXED
ITEM
ZFS on Linux
zfs.arcstats[c_max]
GRADIENT_LINE
0000EE
-
ZFS on Linux
zfs.arcstats[size]
1
BOLD_LINE
DD0000
-
ZFS on Linux
zfs.arcstats[c_max]
2
00BB00
-
ZFS on Linux
zfs.arcstats[c_min]