BUG: spinlock bad magic on CPU#1, jfsCommit/112 ================================================================== BUG: KASAN: slab-use-after-free in string_nocheck lib/vsprintf.c:645 [inline] BUG: KASAN: slab-use-after-free in string+0x223/0x2b0 lib/vsprintf.c:727 Read of size 1 at addr ffff88805d56ef28 by task jfsCommit/112 CPU: 1 PID: 112 Comm: jfsCommit Not tainted syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/02/2025 Call Trace: dump_stack_lvl+0x16c/0x230 lib/dump_stack.c:106 print_address_description mm/kasan/report.c:364 [inline] print_report+0xac/0x220 mm/kasan/report.c:468 kasan_report+0x117/0x150 mm/kasan/report.c:581 string_nocheck lib/vsprintf.c:645 [inline] string+0x223/0x2b0 lib/vsprintf.c:727 vsnprintf+0xe52/0x1a40 lib/vsprintf.c:2823 vprintk_store+0x3c7/0xc70 kernel/printk/printk.c:2226 vprintk_emit+0x11f/0x600 kernel/printk/printk.c:2322 _printk+0xd0/0x110 kernel/printk/printk.c:2366 spin_dump+0x101/0x1a0 kernel/locking/spinlock_debug.c:63 spin_bug kernel/locking/spinlock_debug.c:77 [inline] debug_spin_lock_before kernel/locking/spinlock_debug.c:85 [inline] do_raw_spin_lock+0x1c6/0x2c0 kernel/locking/spinlock_debug.c:114 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:111 [inline] _raw_spin_lock_irqsave+0xb4/0xf0 kernel/locking/spinlock.c:162 __wake_up_common_lock kernel/sched/wait.c:137 [inline] __wake_up+0xf8/0x190 kernel/sched/wait.c:160 unlock_metapage fs/jfs/jfs_metapage.c:38 [inline] release_metapage+0xc5/0x870 fs/jfs/jfs_metapage.c:765 xtTruncate+0xe65/0x2dc0 fs/jfs/jfs_xtree.c:-1 jfs_free_zero_link+0x33b/0x490 fs/jfs/namei.c:758 jfs_evict_inode+0x35d/0x440 fs/jfs/inode.c:159 evict+0x486/0x870 fs/inode.c:705 txLazyCommit fs/jfs/jfs_txnmgr.c:2665 [inline] jfs_lazycommit+0x42b/0xa60 fs/jfs/jfs_txnmgr.c:2733 kthread+0x2fa/0x390 kernel/kthread.c:388 ret_from_fork+0x48/0x80 arch/x86/kernel/process.c:152 ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:293 Allocated by task 7456: kasan_save_stack mm/kasan/common.c:45 [inline] kasan_set_track+0x4e/0x70 mm/kasan/common.c:52 __kasan_slab_alloc+0x6c/0x80 mm/kasan/common.c:328 kasan_slab_alloc include/linux/kasan.h:188 [inline] slab_post_alloc_hook+0x6e/0x4d0 mm/slab.h:767 slab_alloc_node mm/slub.c:3495 [inline] slab_alloc mm/slub.c:3503 [inline] __kmem_cache_alloc_lru mm/slub.c:3510 [inline] kmem_cache_alloc_lru+0x115/0x2e0 mm/slub.c:3526 alloc_inode_sb include/linux/fs.h:2946 [inline] jfs_alloc_inode+0x28/0x60 fs/jfs/super.c:105 alloc_inode fs/inode.c:261 [inline] new_inode_pseudo+0x63/0x1d0 fs/inode.c:1049 new_inode+0x22/0x1b0 fs/inode.c:1075 ialloc+0x4c/0x950 fs/jfs/jfs_inode.c:48 jfs_create+0x18b/0xa40 fs/jfs/namei.c:92 lookup_open fs/namei.c:3496 [inline] open_last_lookups fs/namei.c:3564 [inline] path_openat+0x1277/0x3190 fs/namei.c:3794 do_filp_open+0x1c5/0x3d0 fs/namei.c:3824 do_sys_openat2+0x12c/0x1c0 fs/open.c:1421 do_sys_open fs/open.c:1436 [inline] __do_sys_open fs/open.c:1444 [inline] __se_sys_open fs/open.c:1440 [inline] __x64_sys_open+0x11f/0x140 fs/open.c:1440 do_syscall_x64 arch/x86/entry/common.c:51 [inline] do_syscall_64+0x55/0xb0 arch/x86/entry/common.c:81 entry_SYSCALL_64_after_hwframe+0x68/0xd2 Freed by task 7479: kasan_save_stack mm/kasan/common.c:45 [inline] kasan_set_track+0x4e/0x70 mm/kasan/common.c:52 kasan_save_free_info+0x2e/0x50 mm/kasan/generic.c:522 ____kasan_slab_free+0x126/0x1e0 mm/kasan/common.c:236 kasan_slab_free include/linux/kasan.h:164 [inline] slab_free_hook mm/slub.c:1811 [inline] slab_free_freelist_hook+0x130/0x1b0 mm/slub.c:1837 slab_free mm/slub.c:3830 [inline] kmem_cache_free+0xf8/0x280 mm/slub.c:3852 rcu_do_batch kernel/rcu/tree.c:2194 [inline] rcu_core+0xcc4/0x1720 kernel/rcu/tree.c:2467 handle_softirqs+0x280/0x820 kernel/softirq.c:578 __do_softirq kernel/softirq.c:612 [inline] invoke_softirq kernel/softirq.c:452 [inline] __irq_exit_rcu+0xc7/0x190 kernel/softirq.c:661 irq_exit_rcu+0x9/0x20 kernel/softirq.c:673 instr_sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1088 [inline] sysvec_apic_timer_interrupt+0xa4/0xc0 arch/x86/kernel/apic/apic.c:1088 asm_sysvec_apic_timer_interrupt+0x1a/0x20 arch/x86/include/asm/idtentry.h:687 Last potentially related work creation: kasan_save_stack+0x3e/0x60 mm/kasan/common.c:45 __kasan_record_aux_stack+0xaf/0xc0 mm/kasan/generic.c:492 __call_rcu_common kernel/rcu/tree.c:2721 [inline] call_rcu+0x158/0x930 kernel/rcu/tree.c:2837 destroy_inode fs/inode.c:316 [inline] evict+0x7db/0x870 fs/inode.c:720 dispose_list fs/inode.c:738 [inline] evict_inodes+0x5fe/0x690 fs/inode.c:792 generic_shutdown_super+0x97/0x2b0 fs/super.c:672 kill_block_super+0x44/0x90 fs/super.c:1660 deactivate_locked_super+0x97/0x100 fs/super.c:481 cleanup_mnt+0x429/0x4c0 fs/namespace.c:1259 task_work_run+0x1ce/0x250 kernel/task_work.c:239 resume_user_mode_work include/linux/resume_user_mode.h:49 [inline] exit_to_user_mode_loop+0xe6/0x110 kernel/entry/common.c:177 exit_to_user_mode_prepare+0xf6/0x180 kernel/entry/common.c:210 __syscall_exit_to_user_mode_work kernel/entry/common.c:291 [inline] syscall_exit_to_user_mode+0x1a/0x50 kernel/entry/common.c:302 do_syscall_64+0x61/0xb0 arch/x86/entry/common.c:87 entry_SYSCALL_64_after_hwframe+0x68/0xd2 Second to last potentially related work creation: kasan_save_stack+0x3e/0x60 mm/kasan/common.c:45 __kasan_record_aux_stack+0xaf/0xc0 mm/kasan/generic.c:492 __call_rcu_common kernel/rcu/tree.c:2721 [inline] call_rcu+0x158/0x930 kernel/rcu/tree.c:2837 destroy_inode fs/inode.c:316 [inline] evict+0x7db/0x870 fs/inode.c:720 dispose_list fs/inode.c:738 [inline] evict_inodes+0x5fe/0x690 fs/inode.c:792 generic_shutdown_super+0x97/0x2b0 fs/super.c:672 kill_block_super+0x44/0x90 fs/super.c:1660 deactivate_locked_super+0x97/0x100 fs/super.c:481 cleanup_mnt+0x429/0x4c0 fs/namespace.c:1259 task_work_run+0x1ce/0x250 kernel/task_work.c:239 resume_user_mode_work include/linux/resume_user_mode.h:49 [inline] exit_to_user_mode_loop+0xe6/0x110 kernel/entry/common.c:177 exit_to_user_mode_prepare+0xf6/0x180 kernel/entry/common.c:210 __syscall_exit_to_user_mode_work kernel/entry/common.c:291 [inline] syscall_exit_to_user_mode+0x1a/0x50 kernel/entry/common.c:302 do_syscall_64+0x61/0xb0 arch/x86/entry/common.c:87 entry_SYSCALL_64_after_hwframe+0x68/0xd2 The buggy address belongs to the object at ffff88805d56ef00 which belongs to the cache jfs_ip of size 2240 The buggy address is located 40 bytes inside of freed 2240-byte region [ffff88805d56ef00, ffff88805d56f7c0) The buggy address belongs to the physical page: page:ffffea0001755a00 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x5d568 head:ffffea0001755a00 order:3 entire_mapcount:0 nr_pages_mapped:0 pincount:0 memcg:ffff888022558201 anon flags: 0xfff00000000840(slab|head|node=0|zone=1|lastcpupid=0x7ff) page_type: 0xffffffff() raw: 00fff00000000840 ffff8880187b4280 0000000000000000 0000000000000001 raw: 0000000000000000 00000000800d000d 00000001ffffffff ffff888022558201 page dumped because: kasan: bad access detected page_owner tracks the page as allocated page last allocated via order 3, migratetype Reclaimable, gfp_mask 0x1d2050(__GFP_IO|__GFP_NOWARN|__GFP_NORETRY|__GFP_COMP|__GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_RECLAIMABLE), pid 6082, tgid 6081 (syz.0.62), ts 121938394301, free_ts 120546728191 set_page_owner include/linux/page_owner.h:31 [inline] post_alloc_hook+0x1cd/0x210 mm/page_alloc.c:1554 prep_new_page mm/page_alloc.c:1561 [inline] get_page_from_freelist+0x195c/0x19f0 mm/page_alloc.c:3191 __alloc_pages+0x1e3/0x460 mm/page_alloc.c:4457 alloc_slab_page+0x5d/0x170 mm/slub.c:1881 allocate_slab mm/slub.c:2028 [inline] new_slab+0x87/0x2e0 mm/slub.c:2081 ___slab_alloc+0xc6d/0x1300 mm/slub.c:3253 __slab_alloc mm/slub.c:3339 [inline] __slab_alloc_node mm/slub.c:3392 [inline] slab_alloc_node mm/slub.c:3485 [inline] slab_alloc mm/slub.c:3503 [inline] __kmem_cache_alloc_lru mm/slub.c:3510 [inline] kmem_cache_alloc_lru+0x1ae/0x2e0 mm/slub.c:3526 alloc_inode_sb include/linux/fs.h:2946 [inline] jfs_alloc_inode+0x28/0x60 fs/jfs/super.c:105 alloc_inode fs/inode.c:261 [inline] new_inode_pseudo+0x63/0x1d0 fs/inode.c:1049 new_inode+0x22/0x1b0 fs/inode.c:1075 jfs_fill_super+0x396/0xac0 fs/jfs/super.c:544 mount_bdev+0x22b/0x2d0 fs/super.c:1643 legacy_get_tree+0xea/0x180 fs/fs_context.c:662 vfs_get_tree+0x8c/0x280 fs/super.c:1764 do_new_mount+0x24b/0xa40 fs/namespace.c:3386 do_mount fs/namespace.c:3726 [inline] __do_sys_mount fs/namespace.c:3935 [inline] __se_sys_mount+0x2da/0x3c0 fs/namespace.c:3912 page last free stack trace: reset_page_owner include/linux/page_owner.h:24 [inline] free_pages_prepare mm/page_alloc.c:1154 [inline] free_unref_page_prepare+0x7ce/0x8e0 mm/page_alloc.c:2336 free_unref_page+0x32/0x2e0 mm/page_alloc.c:2429 discard_slab mm/slub.c:2127 [inline] __unfreeze_partials+0x1cf/0x210 mm/slub.c:2667 put_cpu_partial+0x17c/0x250 mm/slub.c:2743 __slab_free+0x31d/0x410 mm/slub.c:3700 qlink_free mm/kasan/quarantine.c:166 [inline] qlist_free_all+0x75/0xe0 mm/kasan/quarantine.c:185 kasan_quarantine_reduce+0x143/0x160 mm/kasan/quarantine.c:292 __kasan_slab_alloc+0x22/0x80 mm/kasan/common.c:305 kasan_slab_alloc include/linux/kasan.h:188 [inline] slab_post_alloc_hook+0x6e/0x4d0 mm/slab.h:767 slab_alloc_node mm/slub.c:3495 [inline] __kmem_cache_alloc_node+0x13e/0x260 mm/slub.c:3534 kmalloc_node_trace+0x26/0xe0 mm/slab_common.c:1111 kmalloc_node include/linux/slab.h:616 [inline] kzalloc_node include/linux/slab.h:732 [inline] __get_vm_area_node+0x125/0x370 mm/vmalloc.c:2613 __vmalloc_node_range+0x36e/0x1320 mm/vmalloc.c:3299 __vmalloc_node mm/vmalloc.c:3404 [inline] vzalloc+0x79/0x90 mm/vmalloc.c:3477 alloc_counters+0xd0/0x690 net/ipv4/netfilter/ip_tables.c:799 copy_entries_to_user net/ipv6/netfilter/ip6_tables.c:837 [inline] get_entries net/ipv6/netfilter/ip6_tables.c:1039 [inline] do_ip6t_get_ctl+0xa91/0x1150 net/ipv6/netfilter/ip6_tables.c:1677 Memory state around the buggy address: ffff88805d56ee00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffff88805d56ee80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffff88805d56ef00: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff88805d56ef80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff88805d56f000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ==================================================================