4.0
2021-01-04T21:27:59Z
Templates
ZFS on Linux
ZFS on Linux
OpenZFS (formerly ZFS on Linux) template.
Home of the project: https://github.com/Cosium/zabbix_zfs-on-linux
Templates
ZFS
ZFS ARC
ZFS dataset
ZFS vdev
ZFS zpool
-
OpenZFS version
7
vfs.file.contents[/sys/module/zfs/version]
1h
30d
0
0
4
0
0
0
0
0
ZFS
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[arc_dnode_limit]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[arc_meta_limit]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[arc_meta_used]
1m
30d
365d
0
3
B
0
0
0
0
arc_meta_used = hdr_size + metadata_size + dbuf_size + dnode_size + bonus_size
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[bonus_size]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC max size
7
zfs.arcstats[c_max]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC minimum size
7
zfs.arcstats[c_min]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[data_size]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[dbuf_size]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[dnode_size]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[hdr_size]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[hits]
1m
30d
365d
0
3
0
0
0
0
0
ZFS
ZFS ARC
10
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[metadata_size]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[mfu_hits]
1m
30d
365d
0
3
0
0
0
0
0
ZFS
ZFS ARC
10
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[mfu_size]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[misses]
1m
30d
365d
0
3
0
0
0
0
0
ZFS
ZFS ARC
10
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[mru_hits]
1m
30d
365d
0
3
0
0
0
0
0
ZFS
ZFS ARC
10
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC stat "$1"
7
zfs.arcstats[mru_size]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC current size
7
zfs.arcstats[size]
1m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC Cache Hit Ratio
15
zfs.arcstats_hit_ratio
1m
30d
365d
0
0
%
0
0
0
100*(last(zfs.arcstats[hits])/(last(zfs.arcstats[hits])+count(zfs.arcstats[hits],#1,0)+last(zfs.arcstats[misses])))
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS ARC total read
15
zfs.arcstats_total_read
1m
30d
365d
0
3
B
0
0
0
last(zfs.arcstats[hits])+last(zfs.arcstats[misses])
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS parameter $1
7
zfs.get.param[zfs_arc_dnode_limit_percent]
1h
30d
365d
0
3
%
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
-
ZFS parameter $1
7
zfs.get.param[zfs_arc_meta_limit_percent]
1h
30d
365d
0
3
%
0
0
0
0
0
ZFS
ZFS ARC
3s
200
1
0
0
0
0
0
0
0
Zfs Dataset discovery
7
zfs.fileset.discovery
30m
0
0
0
0
0
1
2d
Discover ZFS dataset.
Zfs dataset $1 compressratio
7
zfs.get.compressratio[{#FILESETNAME}]
30m
30d
365d
0
0
%
0
0
0
0
0
ZFS
ZFS dataset
1
100
3s
200
1
0
0
0
0
0
0
0
Zfs dataset $1 $2
7
zfs.get.fsinfo[{#FILESETNAME},available]
5m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS dataset
3s
200
1
0
0
0
0
0
0
0
Zfs dataset $1 $2
7
zfs.get.fsinfo[{#FILESETNAME},referenced]
5m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS dataset
3s
200
1
0
0
0
0
0
0
0
Zfs dataset $1 $2
7
zfs.get.fsinfo[{#FILESETNAME},usedbychildren]
5m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS dataset
3s
200
1
0
0
0
0
0
0
0
Zfs dataset $1 $2
7
zfs.get.fsinfo[{#FILESETNAME},usedbydataset]
1h
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS dataset
3s
200
1
0
0
0
0
0
0
0
Zfs dataset $1 $2
7
zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]
5m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS dataset
3s
200
1
0
0
0
0
0
0
0
Zfs dataset $1 $2
7
zfs.get.fsinfo[{#FILESETNAME},used]
5m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS dataset
3s
200
1
0
0
0
0
0
0
0
( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} ) ) > ({$ZFS_AVERAGE_ALERT}/100)
0
More than {$ZFS_AVERAGE_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
0
0
3
0
0
More than {$ZFS_HIGH_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} ) ) > ({$ZFS_HIGH_ALERT}/100)
( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} ) ) > ({$ZFS_DISASTER_ALERT}/100)
0
More than {$ZFS_DISASTER_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
0
0
5
0
0
( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} ) ) > ({$ZFS_HIGH_ALERT}/100)
0
More than {$ZFS_HIGH_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
0
0
4
0
0
More than {$ZFS_DISASTER_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} ) ) > ({$ZFS_DISASTER_ALERT}/100)
ZFS dataset {#FILESETNAME} usage
900
200
0.0000
100.0000
1
1
1
1
0
0.0000
0.0000
1
0
0
0
1
0
3333FF
0
2
0
-
ZFS on Linux
zfs.get.fsinfo[{#FILESETNAME},usedbydataset]
2
0
FF33FF
0
2
0
-
ZFS on Linux
zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]
3
0
FF3333
0
2
0
-
ZFS on Linux
zfs.get.fsinfo[{#FILESETNAME},usedbychildren]
4
0
33FF33
0
2
0
-
ZFS on Linux
zfs.get.fsinfo[{#FILESETNAME},available]
3s
200
1
0
0
0
0
0
0
Zfs Pool discovery
7
zfs.pool.discovery
1h
0
0
0
0
0
0
3d
Zpool {#POOLNAME}: Get iostats
7
vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]
1m
0
0
0
4
0
0
0
0
0
ZFS
ZFS zpool
5
([0-9]+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+).*$
["\1", "\2", "\3", "\4"]
3s
200
1
0
0
0
0
0
0
0
Zpool {#POOLNAME} available
7
zfs.get.fsinfo[{#POOLNAME},available]
5m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS zpool
3s
200
1
0
0
0
0
0
0
0
Zpool {#POOLNAME} used
7
zfs.get.fsinfo[{#POOLNAME},used]
5m
30d
365d
0
3
B
0
0
0
0
0
ZFS
ZFS zpool
3s
200
1
0
0
0
0
0
0
0
Zpool {#POOLNAME} Health
7
zfs.zpool.health[{#POOLNAME}]
5m
30d
0
0
4
0
0
0
0
0
ZFS
ZFS zpool
3s
200
1
0
0
0
0
0
0
0
Zpool {#POOLNAME} read throughput
18
zfs.zpool.iostat.nread[{#POOLNAME}]
0
30d
365d
0
0
Bps
0
0
0
0
0
ZFS
ZFS zpool
12
$[0]
10
3s
200
1
0
0
0
0
0
0
0
vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]
Zpool {#POOLNAME} write throughput
18
zfs.zpool.iostat.nwritten[{#POOLNAME}]
0
30d
365d
0
0
Bps
0
0
0
0
0
ZFS
ZFS zpool
12
$[1]
10
3s
200
1
0
0
0
0
0
0
0
vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]
Zpool {#POOLNAME} IOPS: reads
18
zfs.zpool.iostat.reads[{#POOLNAME}]
0
30d
365d
0
0
iops
0
0
0
0
0
ZFS
ZFS zpool
12
$[2]
10
3s
200
1
0
0
0
0
0
0
0
vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]
Zpool {#POOLNAME} IOPS: writes
18
zfs.zpool.iostat.writes[{#POOLNAME}]
0
30d
365d
0
0
iops
0
0
0
0
0
ZFS
ZFS zpool
12
$[3]
10
3s
200
1
0
0
0
0
0
0
0
vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]
Zpool {#POOLNAME} scrub status
7
zfs.zpool.scrub[{#POOLNAME}]
5m
30d
365d
0
3
0
0
0
0
Detect if the pool is currently scrubbing itself.
This is not a bad thing itself, but it slows down the entire pool and should be terminated when on production server during business hours if it causes a noticeable slowdown.
0
ZFS
ZFS zpool
ZFS zpool scrub status
3s
200
1
0
0
0
0
0
0
0
( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} ) ) > ({$ZPOOL_AVERAGE_ALERT}/100)
0
More than {$ZPOOL_AVERAGE_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
0
0
3
0
0
More than {$ZPOOL_HIGH_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} ) ) > ({$ZPOOL_HIGH_ALERT}/100)
( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} ) ) > ({$ZPOOL_DISASTER_ALERT}/100)
0
More than {$ZPOOL_DISASTER_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
0
0
5
0
0
( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} ) ) > ({$ZPOOL_HIGH_ALERT}/100)
0
More than {$ZPOOL_HIGH_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
0
0
4
0
0
More than {$ZPOOL_DISASTER_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} ) ) > ({$ZPOOL_DISASTER_ALERT}/100)
{ZFS on Linux:zfs.zpool.scrub[{#POOLNAME}].max(12h)}=0
0
Zpool {#POOLNAME} is scrubbing for more than 12h on {HOST.NAME}
0
0
3
0
0
Zpool {#POOLNAME} is scrubbing for more than 24h on {HOST.NAME}
{ZFS on Linux:zfs.zpool.scrub[{#POOLNAME}].max(24h)}=0
{ZFS on Linux:zfs.zpool.scrub[{#POOLNAME}].max(24h)}=0
0
Zpool {#POOLNAME} is scrubbing for more than 24h on {HOST.NAME}
0
0
4
0
0
{ZFS on Linux:zfs.zpool.health[{#POOLNAME}].str(ONLINE)}=0
0
Zpool {#POOLNAME} is {ITEM.VALUE} on {HOST.NAME}
0
0
4
0
0
ZFS zpool {#POOLNAME} IOPS
900
200
0.0000
100.0000
1
1
0
1
0
0.0000
0.0000
1
0
0
0
1
0
5C6BC0
0
2
0
-
ZFS on Linux
zfs.zpool.iostat.reads[{#POOLNAME}]
2
0
EF5350
0
2
0
-
ZFS on Linux
zfs.zpool.iostat.writes[{#POOLNAME}]
ZFS zpool {#POOLNAME} space usage
900
200
0.0000
100.0000
1
1
1
1
0
0.0000
0.0000
0
0
0
0
1
0
00EE00
0
2
0
-
ZFS on Linux
zfs.get.fsinfo[{#POOLNAME},available]
2
0
EE0000
0
2
0
-
ZFS on Linux
zfs.get.fsinfo[{#POOLNAME},used]
ZFS zpool {#POOLNAME} throughput
900
200
0.0000
100.0000
1
1
0
1
0
0.0000
0.0000
1
0
0
0
1
0
5C6BC0
0
2
0
-
ZFS on Linux
zfs.zpool.iostat.nread[{#POOLNAME}]
2
2
EF5350
0
2
0
-
ZFS on Linux
zfs.zpool.iostat.nwritten[{#POOLNAME}]
3s
200
1
0
0
0
0
0
0
Zfs vdev discovery
7
zfs.vdev.discovery
1h
0
0
0
0
0
0
3d
vdev {#VDEV}: CHECKSUM error counter
7
zfs.vdev.error_counter.cksum[{#VDEV}]
5m
30d
365d
0
3
0
0
0
0
This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
If yes, use 'zpool replace' to replace the device.
If not, clear the error with 'zpool clear'.
0
ZFS
ZFS vdev
3s
200
1
0
0
0
0
0
0
0
vdev {#VDEV}: READ error counter
7
zfs.vdev.error_counter.read[{#VDEV}]
5m
30d
365d
0
3
0
0
0
0
This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
If yes, use 'zpool replace' to replace the device.
If not, clear the error with 'zpool clear'.
0
ZFS
ZFS vdev
3s
200
1
0
0
0
0
0
0
0
vdev {#VDEV}: WRITE error counter
7
zfs.vdev.error_counter.write[{#VDEV}]
5m
30d
365d
0
3
0
0
0
0
This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
If yes, use 'zpool replace' to replace the device.
If not, clear the error with 'zpool clear'.
0
ZFS
ZFS vdev
3s
200
1
0
0
0
0
0
0
0
vdev {#VDEV}: total number of errors
15
zfs.vdev.error_total[{#VDEV}]
5m
30d
365d
0
3
0
0
0
last(zfs.vdev.error_counter.cksum[{#VDEV}])+last(zfs.vdev.error_counter.read[{#VDEV}])+last(zfs.vdev.error_counter.write[{#VDEV}])
0
This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
If yes, use 'zpool replace' to replace the device.
If not, clear the error with 'zpool clear'.
0
ZFS
ZFS vdev
3s
200
1
0
0
0
0
0
0
0
{ZFS on Linux:zfs.vdev.error_total[{#VDEV}].last()}>0
0
vdev {#VDEV} has encountered {ITEM.VALUE} errors on {HOST.NAME}
0
0
4
This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
If yes, use 'zpool replace' to replace the device.
If not, clear the error with 'zpool clear'.
You may also run a zpool scrub to check if some other undetected errors are present on this vdev.
0
0
ZFS vdev {#VDEV} errors
900
200
0.0000
100.0000
1
1
0
1
0
0.0000
0.0000
1
0
0
0
0
0
CC00CC
0
2
0
-
ZFS on Linux
zfs.vdev.error_counter.cksum[{#VDEV}]
1
0
F63100
0
2
0
-
ZFS on Linux
zfs.vdev.error_counter.read[{#VDEV}]
2
0
BBBB00
0
2
0
-
ZFS on Linux
zfs.vdev.error_counter.write[{#VDEV}]
3s
200
1
0
0
0
0
0
0
{$ZFS_ARC_META_ALERT}
90
{$ZFS_AVERAGE_ALERT}
90
{$ZFS_DISASTER_ALERT}
99
{$ZFS_HIGH_ALERT}
95
{$ZPOOL_AVERAGE_ALERT}
85
{$ZPOOL_DISASTER_ALERT}
99
{$ZPOOL_HIGH_ALERT}
90
ZFS ARC
1
4
0
1500
150
0
0
1
1
0
0
0
0
0
ZFS ARC memory usage
ZFS on Linux
3
0
1500
150
0
1
1
1
0
0
0
0
0
ZFS ARC Cache Hit Ratio
ZFS on Linux
3
0
1500
150
0
2
1
1
0
0
0
0
0
ZFS ARC breakdown
ZFS on Linux
3
0
1500
150
0
3
1
1
0
0
0
0
0
ZFS ARC arc_meta_used breakdown
ZFS on Linux
3
ZFS zpools
3
1
20
400
100
0
0
1
1
0
0
0
0
0
ZFS zpool {#POOLNAME} IOPS
ZFS on Linux
1
20
400
100
1
0
1
1
0
0
0
0
0
ZFS zpool {#POOLNAME} throughput
ZFS on Linux
1
20
400
100
2
0
1
1
0
0
0
0
0
ZFS zpool {#POOLNAME} space usage
ZFS on Linux
1
{ZFS on Linux:vfs.file.contents[/sys/module/zfs/version].diff(0)}>0
0
Version of OpenZFS is now {ITEM.VALUE} on {HOST.NAME}
0
0
1
0
0
{ZFS on Linux:zfs.arcstats[dnode_size].last()}>({ZFS on Linux:zfs.arcstats[arc_dnode_limit].last()}*0.9)
0
ZFS ARC dnode size > 90% dnode max size on {HOST.NAME}
0
0
4
0
0
{ZFS on Linux:zfs.arcstats[arc_meta_used].last()}>({ZFS on Linux:zfs.arcstats[arc_meta_limit].last()}*0.01*{$ZFS_ARC_META_ALERT})
0
ZFS ARC meta size > {$ZFS_ARC_META_ALERT}% meta max size on {HOST.NAME}
0
0
4
0
0
ZFS ARC arc_meta_used breakdown
900
200
0.0000
100.0000
1
1
1
1
0
0.0000
0.0000
1
0
0
0
0
0
3333FF
0
2
0
-
ZFS on Linux
zfs.arcstats[metadata_size]
1
0
00EE00
0
2
0
-
ZFS on Linux
zfs.arcstats[dnode_size]
2
0
EE0000
0
2
0
-
ZFS on Linux
zfs.arcstats[hdr_size]
3
0
EEEE00
0
2
0
-
ZFS on Linux
zfs.arcstats[dbuf_size]
4
0
EE00EE
0
2
0
-
ZFS on Linux
zfs.arcstats[bonus_size]
ZFS ARC breakdown
900
200
0.0000
100.0000
1
1
1
1
0
0.0000
0.0000
1
0
0
0
0
0
3333FF
0
2
0
-
ZFS on Linux
zfs.arcstats[data_size]
1
0
00AA00
0
2
0
-
ZFS on Linux
zfs.arcstats[metadata_size]
2
0
EE0000
0
2
0
-
ZFS on Linux
zfs.arcstats[dnode_size]
3
0
CCCC00
0
2
0
-
ZFS on Linux
zfs.arcstats[hdr_size]
4
0
A54F10
0
2
0
-
ZFS on Linux
zfs.arcstats[dbuf_size]
5
0
888888
0
2
0
-
ZFS on Linux
zfs.arcstats[bonus_size]
ZFS ARC Cache Hit Ratio
900
200
0.0000
100.0000
1
1
0
1
0
0.0000
0.0000
1
1
0
0
0
0
00CC00
0
2
0
-
ZFS on Linux
zfs.arcstats_hit_ratio
ZFS ARC memory usage
900
200
0.0000
100.0000
1
1
0
1
0
0.0000
0.0000
1
2
0
ZFS on Linux
zfs.arcstats[c_max]
0
5
0000EE
0
2
0
-
ZFS on Linux
zfs.arcstats[size]
1
2
DD0000
0
2
0
-
ZFS on Linux
zfs.arcstats[c_max]
2
0
00BB00
0
2
0
-
ZFS on Linux
zfs.arcstats[c_min]
ZFS zpool scrub status
0
Scrub in progress
1
No scrub in progress