bcachefs (loop0): running explicit recovery pass check_alloc_info (13), currently at recovery_pass_empty (0) error reading btree root btree=alloc level=0: btree_node_read_error, fixing ================================================================== BUG: KASAN: use-after-free in poly1305_update_arch+0x2c4/0x380 data/ghui/docker_data/linux_kernel/upstream/linux/arch/x86/crypto/poly1305_glue.c:198 Read of size 8 at addr ffff888050387050 by task syz-executor129/9582 CPU: 4 UID: 0 PID: 9582 Comm: syz-executor129 Not tainted 6.14.0-rc7-00205-g586de92313fc #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014 Call Trace: <TASK> __dump_stack data/ghui/docker_data/linux_kernel/upstream/linux/lib/dump_stack.c:94 [inline] dump_stack_lvl+0x116/0x1b0 data/ghui/docker_data/linux_kernel/upstream/linux/lib/dump_stack.c:120 print_address_description data/ghui/docker_data/linux_kernel/upstream/linux/mm/kasan/report.c:408 [inline] print_report+0xc1/0x630 data/ghui/docker_data/linux_kernel/upstream/linux/mm/kasan/report.c:521 kasan_report+0xbd/0xf0 data/ghui/docker_data/linux_kernel/upstream/linux/mm/kasan/report.c:634 check_region_inline data/ghui/docker_data/linux_kernel/upstream/linux/mm/kasan/generic.c:183 [inline] kasan_check_range+0xf4/0x1a0 data/ghui/docker_data/linux_kernel/upstream/linux/mm/kasan/generic.c:189 __asan_memcpy+0x24/0x60 data/ghui/docker_data/linux_kernel/upstream/linux/mm/kasan/shadow.c:105 poly1305_update_arch+0x2c4/0x380 data/ghui/docker_data/linux_kernel/upstream/linux/arch/x86/crypto/poly1305_glue.c:198 crypto_poly1305_update+0x25/0x30 data/ghui/docker_data/linux_kernel/upstream/linux/arch/x86/crypto/poly1305_glue.c:230 bch2_checksum+0x1da/0x2c0 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/checksum.c:239 bch2_btree_node_read_done+0x2894/0x4b80 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:1130 btree_node_read_work+0x670/0x1050 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:1358 bch2_btree_node_read+0x8ad/0xea0 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:1748 __bch2_btree_root_read data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:1789 [inline] bch2_btree_root_read+0x2c6/0x460 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:1811 read_btree_roots data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/recovery.c:581 [inline] bch2_fs_recovery+0x222b/0x4340 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/recovery.c:928 bch2_fs_start+0x332/0x690 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/super.c:1041 bch2_fs_get_tree+0x1118/0x1720 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/fs.c:2203 vfs_get_tree+0x93/0x340 data/ghui/docker_data/linux_kernel/upstream/linux/fs/super.c:1814 do_new_mount data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:3560 [inline] path_mount+0x6b2/0x1ea0 data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:3887 do_mount data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:3900 [inline] __do_sys_mount data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:4111 [inline] __se_sys_mount data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:4088 [inline] __x64_sys_mount+0x27d/0x300 data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:4088 do_syscall_x64 data/ghui/docker_data/linux_kernel/upstream/linux/arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcb/0x250 data/ghui/docker_data/linux_kernel/upstream/linux/arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7f1317a6cb3e Code: 48 c7 c0 ff ff ff ff eb aa e8 fe 06 00 00 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 f3 0f 1e fa 49 89 ca b8 a5 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fffb6d639b8 EFLAGS: 00000282 ORIG_RAX: 00000000000000a5 RAX: ffffffffffffffda RBX: 0000555556e56378 RCX: 00007f1317a6cb3e RDX: 0000000020011a00 RSI: 0000000020000000 RDI: 00007fffb6d639d0 RBP: 00007fffb6d639d0 R08: 00007fffb6d63a10 R09: 00000000000119ee R10: 0000000000000000 R11: 0000000000000282 R12: 0000000000000000 R13: 00007fffb6d63a10 R14: 0000000000000003 R15: 0000000000000000 </TASK> The buggy address belongs to the physical page: page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x50387 flags: 0xfff00000000000(node=0|zone=1|lastcpupid=0x7ff) raw: 00fff00000000000 0000000000000000 ffffea000140e1c8 0000000000000000 raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000 page dumped because: kasan: bad access detected page_owner tracks the page as freed page last allocated via order 5, migratetype Reclaimable, gfp_mask 0x452cd0(GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY|__GFP_COMP|__GFP_RECLAIMABLE), pid 9582, tgid 9582 (syz-executor129), ts 67557047425, free_ts 67641619968 set_page_owner data/ghui/docker_data/linux_kernel/upstream/linux/include/linux/page_owner.h:32 [inline] post_alloc_hook+0x193/0x1c0 data/ghui/docker_data/linux_kernel/upstream/linux/mm/page_alloc.c:1551 prep_new_page data/ghui/docker_data/linux_kernel/upstream/linux/mm/page_alloc.c:1559 [inline] get_page_from_freelist+0xe4e/0x2b20 data/ghui/docker_data/linux_kernel/upstream/linux/mm/page_alloc.c:3477 __alloc_frozen_pages_noprof+0x219/0x21f0 data/ghui/docker_data/linux_kernel/upstream/linux/mm/page_alloc.c:4740 __alloc_pages_noprof+0xc/0x1b0 data/ghui/docker_data/linux_kernel/upstream/linux/mm/page_alloc.c:4774 __alloc_pages_node_noprof data/ghui/docker_data/linux_kernel/upstream/linux/include/linux/gfp.h:265 [inline] alloc_pages_node_noprof data/ghui/docker_data/linux_kernel/upstream/linux/include/linux/gfp.h:292 [inline] ___kmalloc_large_node+0x8b/0x1b0 data/ghui/docker_data/linux_kernel/upstream/linux/mm/slub.c:4239 __kmalloc_large_node_noprof+0x1a/0x70 data/ghui/docker_data/linux_kernel/upstream/linux/mm/slub.c:4266 __do_kmalloc_node data/ghui/docker_data/linux_kernel/upstream/linux/mm/slub.c:4282 [inline] __kmalloc_node_noprof+0x38a/0x530 data/ghui/docker_data/linux_kernel/upstream/linux/mm/slub.c:4300 __kvmalloc_node_noprof+0x7e/0x1b0 data/ghui/docker_data/linux_kernel/upstream/linux/mm/util.c:665 btree_node_data_alloc.constprop.0+0xe9/0x2a0 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_cache.c:156 __bch2_btree_node_mem_alloc+0x37/0xa0 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_cache.c:201 bch2_fs_btree_cache_init+0x10c/0x550 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_cache.c:655 bch2_fs_alloc+0x171d/0x2380 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/super.c:919 bch2_fs_open+0x91b/0x1100 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/super.c:2065 bch2_fs_get_tree+0x1041/0x1720 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/fs.c:2190 vfs_get_tree+0x93/0x340 data/ghui/docker_data/linux_kernel/upstream/linux/fs/super.c:1814 do_new_mount data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:3560 [inline] path_mount+0x6b2/0x1ea0 data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:3887 page last free pid 9582 tgid 9582 stack trace: reset_page_owner data/ghui/docker_data/linux_kernel/upstream/linux/include/linux/page_owner.h:25 [inline] free_pages_prepare data/ghui/docker_data/linux_kernel/upstream/linux/mm/page_alloc.c:1127 [inline] __free_pages_ok+0x618/0xe80 data/ghui/docker_data/linux_kernel/upstream/linux/mm/page_alloc.c:1271 __folio_put+0x321/0x440 data/ghui/docker_data/linux_kernel/upstream/linux/mm/swap.c:112 kvfree+0x46/0x50 data/ghui/docker_data/linux_kernel/upstream/linux/mm/util.c:708 btree_bounce_free data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:111 [inline] bch2_btree_node_read_done+0x3ab2/0x4b80 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:1243 btree_node_read_work+0x670/0x1050 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:1358 bch2_btree_node_read+0x8ad/0xea0 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:1748 __bch2_btree_root_read data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:1789 [inline] bch2_btree_root_read+0x2c6/0x460 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/btree_io.c:1811 read_btree_roots data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/recovery.c:581 [inline] bch2_fs_recovery+0x222b/0x4340 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/recovery.c:928 bch2_fs_start+0x332/0x690 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/super.c:1041 bch2_fs_get_tree+0x1118/0x1720 data/ghui/docker_data/linux_kernel/upstream/linux/fs/bcachefs/fs.c:2203 vfs_get_tree+0x93/0x340 data/ghui/docker_data/linux_kernel/upstream/linux/fs/super.c:1814 do_new_mount data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:3560 [inline] path_mount+0x6b2/0x1ea0 data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:3887 do_mount data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:3900 [inline] __do_sys_mount data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:4111 [inline] __se_sys_mount data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:4088 [inline] __x64_sys_mount+0x27d/0x300 data/ghui/docker_data/linux_kernel/upstream/linux/fs/namespace.c:4088 do_syscall_x64 data/ghui/docker_data/linux_kernel/upstream/linux/arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcb/0x250 data/ghui/docker_data/linux_kernel/upstream/linux/arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f Memory state around the buggy address: ffff888050386f00: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff888050386f80: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff >ffff888050387000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ^ ffff888050387080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff888050387100: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ==================================================================