BUG: spinlock bad magic on CPU#1, jfsCommit/112 ================================================================== BUG: KASAN: slab-out-of-bounds in string_nocheck lib/vsprintf.c:653 [inline] BUG: KASAN: slab-out-of-bounds in string+0x231/0x2b0 lib/vsprintf.c:735 Read of size 1 at addr ffff888075532520 by task jfsCommit/112 CPU: 1 UID: 0 PID: 112 Comm: jfsCommit Not tainted 6.16.0-next-20250731-syzkaller #0 PREEMPT(full) Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2025 Call Trace: dump_stack_lvl+0x189/0x250 lib/dump_stack.c:120 print_address_description mm/kasan/report.c:378 [inline] print_report+0xca/0x240 mm/kasan/report.c:482 kasan_report+0x118/0x150 mm/kasan/report.c:595 string_nocheck lib/vsprintf.c:653 [inline] string+0x231/0x2b0 lib/vsprintf.c:735 vsnprintf+0x739/0xf00 lib/vsprintf.c:2926 vprintk_store+0x3c7/0xd00 kernel/printk/printk.c:2279 vprintk_emit+0x21e/0x7a0 kernel/printk/printk.c:2426 _printk+0xcf/0x120 kernel/printk/printk.c:2475 spin_dump+0x102/0x1a0 kernel/locking/spinlock_debug.c:64 spin_bug kernel/locking/spinlock_debug.c:78 [inline] debug_spin_lock_before kernel/locking/spinlock_debug.c:86 [inline] do_raw_spin_lock+0x1ca/0x290 kernel/locking/spinlock_debug.c:115 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:111 [inline] _raw_spin_lock_irqsave+0xb3/0xf0 kernel/locking/spinlock.c:162 __wake_up_common_lock+0x2f/0x1f0 kernel/sched/wait.c:124 unlock_metapage fs/jfs/jfs_metapage.c:40 [inline] release_metapage+0x13c/0xac0 fs/jfs/jfs_metapage.c:871 xtTruncate+0xe84/0x2e70 fs/jfs/jfs_xtree.c:-1 jfs_free_zero_link+0x33a/0x4a0 fs/jfs/namei.c:759 jfs_evict_inode+0x363/0x440 fs/jfs/inode.c:153 evict+0x501/0x9c0 fs/inode.c:810 txLazyCommit fs/jfs/jfs_txnmgr.c:2664 [inline] jfs_lazycommit+0x43f/0xa90 fs/jfs/jfs_txnmgr.c:2733 kthread+0x70e/0x8a0 kernel/kthread.c:463 ret_from_fork+0x3fc/0x770 arch/x86/kernel/process.c:148 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:245 The buggy address belongs to the object at ffff8880755324e0 which belongs to the cache jfs_ip of size 2232 The buggy address is located 64 bytes inside of allocated 2232-byte region [ffff8880755324e0, ffff888075532d98) The buggy address belongs to the physical page: page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x75530 head: order:3 mapcount:0 entire_mapcount:0 nr_pages_mapped:0 pincount:0 memcg:ffff888076772c01 flags: 0xfff00000000040(head|node=0|zone=1|lastcpupid=0x7ff) page_type: f5(slab) raw: 00fff00000000040 ffff88801eb27b40 dead000000000122 0000000000000000 raw: 0000000000000000 00000000800d000d 00000000f5000000 ffff888076772c01 head: 00fff00000000040 ffff88801eb27b40 dead000000000122 0000000000000000 head: 0000000000000000 00000000800d000d 00000000f5000000 ffff888076772c01 head: 00fff00000000003 ffffea0001d54c01 00000000ffffffff 00000000ffffffff head: ffffffffffffffff 0000000000000000 00000000ffffffff 0000000000000008 page dumped because: kasan: bad access detected page_owner tracks the page as allocated page last allocated via order 3, migratetype Reclaimable, gfp_mask 0xd2050(__GFP_RECLAIMABLE|__GFP_IO|__GFP_NOWARN|__GFP_NORETRY|__GFP_COMP|__GFP_NOMEMALLOC), pid 8881, tgid 8880 (syz.9.528), ts 296097500722, free_ts 293166445222 set_page_owner include/linux/page_owner.h:32 [inline] post_alloc_hook+0x240/0x2a0 mm/page_alloc.c:1851 prep_new_page mm/page_alloc.c:1859 [inline] get_page_from_freelist+0x21e4/0x22c0 mm/page_alloc.c:3858 __alloc_frozen_pages_noprof+0x181/0x370 mm/page_alloc.c:5148 alloc_pages_mpol+0x232/0x4a0 mm/mempolicy.c:2416 alloc_slab_page mm/slub.c:2487 [inline] allocate_slab+0x8a/0x370 mm/slub.c:2655 new_slab mm/slub.c:2709 [inline] ___slab_alloc+0xbeb/0x1410 mm/slub.c:3891 __slab_alloc mm/slub.c:3981 [inline] __slab_alloc_node mm/slub.c:4056 [inline] slab_alloc_node mm/slub.c:4217 [inline] kmem_cache_alloc_lru_noprof+0x288/0x3d0 mm/slub.c:4248 jfs_alloc_inode+0x28/0x70 fs/jfs/super.c:105 alloc_inode+0x6a/0x1b0 fs/inode.c:346 new_inode+0x22/0x170 fs/inode.c:1145 ialloc+0x4c/0x8f0 fs/jfs/jfs_inode.c:48 jfs_create+0x18d/0xa80 fs/jfs/namei.c:92 lookup_open fs/namei.c:3708 [inline] open_last_lookups fs/namei.c:3807 [inline] path_openat+0x14f1/0x3830 fs/namei.c:4043 do_filp_open+0x1fa/0x410 fs/namei.c:4073 do_sys_openat2+0x121/0x1c0 fs/open.c:1435 do_sys_open fs/open.c:1450 [inline] __do_sys_open fs/open.c:1458 [inline] __se_sys_open fs/open.c:1454 [inline] __x64_sys_open+0x11e/0x150 fs/open.c:1454 page last free pid 5216 tgid 5216 stack trace: reset_page_owner include/linux/page_owner.h:25 [inline] free_pages_prepare mm/page_alloc.c:1395 [inline] __free_frozen_pages+0xbc4/0xd30 mm/page_alloc.c:2895 __slab_free+0x303/0x3c0 mm/slub.c:4591 qlink_free mm/kasan/quarantine.c:163 [inline] qlist_free_all+0x97/0x140 mm/kasan/quarantine.c:179 kasan_quarantine_reduce+0x148/0x160 mm/kasan/quarantine.c:286 __kasan_slab_alloc+0x22/0x80 mm/kasan/common.c:340 kasan_slab_alloc include/linux/kasan.h:250 [inline] slab_post_alloc_hook mm/slub.c:4180 [inline] slab_alloc_node mm/slub.c:4229 [inline] __do_kmalloc_node mm/slub.c:4364 [inline] __kmalloc_noprof+0x224/0x4f0 mm/slub.c:4377 kmalloc_noprof include/linux/slab.h:909 [inline] tomoyo_realpath_from_path+0xe3/0x5d0 security/tomoyo/realpath.c:251 tomoyo_get_realpath security/tomoyo/file.c:151 [inline] tomoyo_path_perm+0x213/0x4b0 security/tomoyo/file.c:822 security_inode_getattr+0x12f/0x330 security/security.c:2377 vfs_getattr fs/stat.c:259 [inline] vfs_fstat fs/stat.c:281 [inline] __do_sys_newfstat fs/stat.c:555 [inline] __se_sys_newfstat fs/stat.c:550 [inline] __x64_sys_newfstat+0xfc/0x200 fs/stat.c:550 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline] do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94 entry_SYSCALL_64_after_hwframe+0x77/0x7f Memory state around the buggy address: ffff888075532400: 00 00 00 00 00 00 00 00 00 00 00 00 fc fc fc fc ffff888075532480: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffff888075532500: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ^ ffff888075532580: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff888075532600: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ==================================================================