last executing test programs: 37.411453521s ago: executing program 0 (id=253): r0 = syz_usb_connect$hid(0x0, 0x36, &(0x7f0000000580)=ANY=[@ANYBLOB="12013f00000000407f04ffff000000000001090224000100000000090400001503000000092140000001220f00090581", @ANYRES16], 0x0) bpf$BPF_BTF_GET_FD_BY_ID(0x13, 0x0, 0x0) r1 = socket$inet6(0xa, 0x80002, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x8000, &(0x7f0000000280)={0xa, 0x4e20, 0x0, @remote, 0x7}, 0x1c) sendto$inet6(r1, &(0x7f00000009c0)="c7cfcaaa22e10542fca5c0195350f15147657e0bfc59d383a47190db88690e6fedc3040ab5809ae02a54cd429cc3338c5afa0c9dce3f91950d1f567f358ac21154159130e88cbb6c43197813b2f23f3e442f80877490b393408142ebcfea6821f543e5ee9e27032e2b75d78f1b79f5a6bb6f0645e267770ef7e8f3a92148091217450ce8581e54223eeb6486205a209bf1fe854d211c03f8c3140fc3979d824082990d119473d20e94f253c9621fac339560ae46cb24b88bf2d01559bb658e343257b90f233b81bc5c398be3bbddb23a1e", 0xffd6, 0xc001, 0x0, 0xffffffffffffff0c) setsockopt$inet6_mtu(r1, 0x29, 0x17, &(0x7f0000000040)=0x3, 0x4) setsockopt$inet6_udp_int(r1, 0x11, 0x1, &(0x7f0000000080), 0x4) syz_emit_ethernet(0x4a, &(0x7f0000000000)=ANY=[@ANYBLOB="aaaaaaaaaaaaaaaaaaaaaaaa86dd608a37f200082c00fe8000000000000000000000000000bbfe8000000000000000000000000000aa3a000005"], 0x0) syz_emit_ethernet(0x0, 0x0, 0x0) syz_usb_control_io(r0, 0x0, 0x0) syz_usb_control_io$hid(r0, &(0x7f0000000000)={0x24, 0x0, 0x0, &(0x7f0000000040)={0x0, 0x22, 0xf, {[@local=@item_4={0x3, 0x2, 0x0, "f896e404"}, @local=@item_012={0x1, 0x2, 0x0, 'e'}, @main=@item_012={0x2, 0x0, 0x0, "f792"}, @main=@item_4={0x3, 0x0, 0x0, "9ef12d19"}]}}, 0x0}, 0x0) r2 = syz_open_dev$hiddev(&(0x7f0000000080), 0x0, 0x0) ioctl$HIDIOCGUSAGE(r2, 0x5452, &(0x7f00000000c0)={0x2}) 34.815582752s ago: executing program 1 (id=257): r0 = socket$xdp(0x2c, 0x3, 0x0) setsockopt$XDP_UMEM_REG(r0, 0x11b, 0x4, &(0x7f0000000040)={&(0x7f0000000000)=""/5, 0x1c000, 0x800}, 0x20) setsockopt$XDP_TX_RING(r0, 0x11b, 0x3, &(0x7f00000003c0)=0x800, 0x4) socketpair$unix(0x1, 0x5, 0x0, &(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) ioctl$sock_SIOCGIFINDEX(r1, 0x8933, &(0x7f0000000000)={'lo\x00', 0x0}) setsockopt$XDP_UMEM_FILL_RING(r0, 0x11b, 0x5, &(0x7f00000000c0)=0x40, 0x4) setsockopt$XDP_UMEM_COMPLETION_RING(r0, 0x11b, 0x6, &(0x7f0000000280)=0x20, 0x4) bind$xdp(r0, &(0x7f00000001c0)={0x2c, 0x0, r2}, 0x2a) r3 = socket$xdp(0x2c, 0x3, 0x0) setsockopt$XDP_TX_RING(r3, 0x11b, 0x3, &(0x7f0000000240)=0x800, 0x4) bind$xdp(r3, &(0x7f0000000100)={0x2c, 0x1, r2, 0x0, r0}, 0x10) mmap$xdp(&(0x7f0000ffe000/0x2000)=nil, 0x2000, 0x0, 0x13, r3, 0x100000000) 34.577122305s ago: executing program 1 (id=258): r0 = socket$pppl2tp(0x18, 0x1, 0x1) r1 = socket$inet6_udp(0xa, 0x2, 0x0) connect$pppl2tp(r0, &(0x7f0000000040)=@pppol2tpv3={0x18, 0x1, {0x3, r1, {0x2, 0x0, @multicast2}, 0x2}}, 0x2e) r2 = socket$pppl2tp(0x18, 0x1, 0x1) connect$pppl2tp(r2, &(0x7f0000000080)=@pppol2tpv3={0x18, 0x1, {0x3, 0xffffffffffffffff, {0x2, 0x0, @multicast2}, 0x2, 0xfffffffd}}, 0x2e) bpf$PROG_LOAD(0x5, &(0x7f0000000180)={0x1, 0x0, 0x0, 0x0}, 0x90) r3 = openat$ppp(0xffffffffffffff9c, &(0x7f0000000040), 0x1a01, 0x0) ioctl$EVIOCGPROP(r3, 0x40047438, &(0x7f0000000180)=""/246) openat$fuse(0xffffffffffffff9c, &(0x7f0000002080), 0x42, 0x0) openat$iommufd(0xffffffffffffff9c, &(0x7f0000000080), 0x0, 0x0) syz_usb_connect(0x0, 0x3f, &(0x7f0000000080)=ANY=[], 0x0) close_range(0xffffffffffffffff, 0xffffffffffffffff, 0x2) pselect6(0x40, &(0x7f00000001c0)={0x0, 0x0, 0x0, 0x10, 0x0, 0x26fb3ca1}, 0x0, &(0x7f0000000280)={0x3ff, 0x0, 0xfecf, 0x0, 0xfffffffffffffffe, 0x6}, &(0x7f0000002000)={0x0, 0x3938700}, 0x0) 34.200361145s ago: executing program 0 (id=260): r0 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) ioctl$sock_bt_hci(r0, 0x800448f0, 0x0) socket$packet(0x11, 0x0, 0x300) syz_mount_image$ext4(&(0x7f00000000c0)='ext4\x00', &(0x7f0000000080)='./file0\x00', 0x4, &(0x7f0000000000), 0x1, 0x625, &(0x7f0000001400)="$eJzs3c9rXNUeAPDvncnvvPeShsd7r2/xXkC0BW3SpK0UEWwRXJVSfywEN45NWmqnP2gimlppAnUjiBsXgisX1oX/gxYEV/4DLty4kkoR6UYpOnInd8ZpMjeZxMxMm/l8YDr33HNzz7nJfHvOnDnnTgA9azL9pxCxNyIuJxETDXl9kWVOrh539+drp9NHEpXKiz8lce16stx4riR7Ho2INOO3sUi+joiJ4vpyF5auni+Vy/NXsvT04oXL0wtLVw+cu1A6O392/uLsk7NHjxw+cnTmYMNPnXluq9e3p2H7xI3X3xx77+Qrn358L5n57LuTSRyrVzq9rq2eezOTMRmVTOP+9Pd6dKcL65Ji/XXyp2TtjrWebWOF2JLa368/Iv4dY1Fs+GuOxbvPd7VyQFtVkqi3UUCvScQ/9KhaP6D23r6198EDbe6VAJ1w53jEY/X474+I1fg/NpuNDcZQdWxg5G5y3zhPEhEHd6D8tIxvvjp5I31Em8bhgOaWVwazIfC17X9Sjc3xGKqmRu4W7ov/QjaMO56NH76wcTFjeRmTa9JZ+YPbvR6gdcsrEfGfZv3/zeP/1ew53f/aNsvPiX8AAAAAAABgG24dj4gnmn3+V6jP/xloMv9nNCKO7UD5m3/+V7i9A8UATdw5HvF00/m/hdoh48Us9ffqfID+5My58vzBiPhHROyP/sE0PdN40s8bfjoiDrw/8VFe+Y3z/9JHWn5tLmBWj9t9a2YDzZUWSztw6dDz7qxE/Lcvf/5P2v4nTdr/NL4vt1jGxKM3T+XlbR7/QLtUPonY17T9T+rHJBvfn2O62h+YrvUK1vvf2x98kVe++IfuSdv/kY3jfzBpvF/PwtbOPxARh5b6Knn52+3/DyQvFaNhJeJbpcXFKzMRA8mJ9ftnt1ZneJgMtX7oOxFRjYdavKTxv/+Rjcf/6v3/hjgczu7x1Yp//T76fV6e9h+6J43/uY3b//H72/+tb8zeHP8yr/xTLbX/h6tt+v5sj/E/2FirAdrtegIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAw6gQEX+LpDBV3y4UpqYiRiPinzFSKF9aWHz8zKU3Ls6ledXv/y/Uvul3bDWd1L7/f7whPbsmfSgi9kTEh8Xhanrq9KXyXLcvHgAAAAAAAAAAAAAAAAAAAB4Qo6tP69b/p34odrVqQCf0Zc/iHXpPX7crAHSN+IfeJf6hd+XH/y/3KlUdrQ7QQa22/5Xrba4I0HHb7P/7uAB2Ae//oVf1t3bYULvrAXSD9h8AAAAAAHaVPf+/9W0SEctPDVcfqYEsr/7B4HC3age0UyEvY7Cz9QA6zxxe6F2m/kDvanHyL7CLJfWtX5su9s+f/Z+0p0IAAAAAAAAAAAAAwDr79raw/h/YlXLX/wO73gbr/5st7HG7ANhFrP+H3uU2X0Cts5/3Tf/W/wMAAAAAAAAAAADAA2Do6vlSuTx/ZWGp5Y3rWzn4r2/8GHlZz9Q2VjpQjUpxZ86zXOrcr+4h3+iPiDVZlbHVl+35Uvnl6Gx9ahHTibIGOlhWzkaX/j8CAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADW+SMAAP//fSQouA==") r1 = bpf$MAP_CREATE(0x0, &(0x7f00000000c0)=@base={0x1b, 0x0, 0x0, 0x8000}, 0x48) bpf$PROG_LOAD(0x5, &(0x7f00000000c0)={0x0, 0xc, &(0x7f0000000240)=ANY=[@ANYBLOB="1800000000000000000000000000000018120000", @ANYRES32=r1, @ANYBLOB="0000000000000000b7080000000000007b8af8ff00000000bfa200000000000007020000f8ffffffb703000008000000b704000002010010850000004300000095"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x90) r2 = bpf$PROG_LOAD(0x5, &(0x7f00000000c0)={0x11, 0xc, &(0x7f0000000240)=ANY=[], &(0x7f0000000200)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x90) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000400)={&(0x7f0000000540)='sock_rcvqueue_full\x00', r2}, 0x10) r3 = openat$ptmx(0xffffffffffffff9c, &(0x7f0000000000), 0x0, 0x0) ioctl$TIOCSETD(r3, 0x5423, &(0x7f00000000c0)=0xf) ioctl$TCFLSH(r3, 0x400455c8, 0x0) r4 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) setsockopt$sock_int(r4, 0x1, 0x8, &(0x7f0000000080), 0xfd32) bind$bt_hci(r4, &(0x7f0000000040)={0x1f, 0xffffffffffffffff, 0x2}, 0x6) 33.780006962s ago: executing program 1 (id=262): syz_mount_image$jfs(&(0x7f0000000140), &(0x7f00000000c0)='./file0\x00', 0x804000, &(0x7f0000000a00)=ANY=[@ANYRESOCT=0x0, @ANYRESHEX=0x0, @ANYBLOB="2c726573697a653d3078303030303030303030303030303030332c6e6f696e746567726974792c696f636861727365743d61776369692c75737271756f74612c6673636f6e746578743d73746166665f752c7063723d30303030303030303030303030303030303030342c666f776e65723ec2345dea8f76444c08f713d549cdf94d6b94cb9d6acc68bd8379e863f8451f62f4cb649865592694d05c606fb8300a4657c379a603d660a8f37697c17e9d71a24c721bc28084b51e1ad1", @ANYRESDEC=0x0, @ANYBLOB="c4172812275f94fddc043bc079bc77fbcbd5a6fae6f4b638000b3733f61e8ea49e71e6842055dc8bea4c8ea12e2a9d59a67e1b4c5f2248ba1fd3719be18c21789c2c7e7201b200ccf440260d70f50db05a36a52167c81e747043cd9b5b58193328444b12519b539d8fb5f946f90956c5549788fe0c4ac1851e6ba9740861e6c4a5b7078cf5466f087f93d66a795769e63a67c1df3e69a98e651c8d924cb434c405ee3fd9679187f08bfc631987da61345f67ab6c70ace0e4841d6a12c211d332d2568ae733c7bd49a1d9ce8d691ee76a53cb616ba030800c5f33feb353990169f2a3084fbe2b5d9a9bdd69f9dafad3d37d0be2d1a3b39b0a34d563a5890d8112168a00063028806d01bf03a162413a8772f192c67c39346a7088ff6d2ef3806471ce640368ff1c249aacc0220132fe74d271ad8be490a21238812c2dfe227487bac572f2bc0f13a3901665e73172c73177ddf621ad73b0a4d721856741f27541716dbe13c6af48350f5485ee7e61e1d866b4b9db907488c1b10273c7e6b01a7ccb7460c0cf81491ac544b41a3709597db74a9bb1409b7acced5564cf294be19d59a6579dcd52f66f1f9b3336bf7c6c4eb2cd75be3cc278c4f2e88afeac2b68f8100b0d728a227d76388fea62decc"], 0xfe, 0x6166, &(0x7f0000001300)="$eJzs3c1vHGcdB/DfvvqltLV6qEqEkJuWl1KaxEkJgQJtD3Dg0gPKFSVy3SoiBZQElFYWceULB078BSAkjghxRBz4A3rgyo0TJyLZSKCeGDT288TjzW7t1PHO2s/nIzkzv3lmvc/4u7MvmZl9AgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACI73/vByudiLj287RgKeIz0YvoRizU9XJELCwv5fX7EfFc7DTHsxExmIuob7/zz9MRr0bER09FbG2vr9aLLx6yH9/9499/98Mn3vrbHwbn//unO73XJq139+6v/vPne0fbZgAAAChNVVVVJ33MP5M+33fb7hQAMBX59b9K8vJTX//6n2/9ZZb6o1ar1Wr1FOqmarx7zSIiNpq3qd8zOBwPACfMRnzcdhdokfyL1o+IJ9ruBDDTOm13gGOxtb2+2kn5dpqvB8u77flckH35b3QeXN8xaXqQ0XNMpvX42oxePDOhPwtT6sMsyfl3R/O/tts+TOsdd/7TMin/4e6lT8XJ+fdG8x9xevLvjs2/VDn//iPl35M/AAAAAADMsPz//0stH/+dO/qmHMonHf9dnlIfAAAAAAAAAOBxO+r4fw8Y/w8AAABmVv1Zvfabp/aWTfoutnr51U7EkyPrA4VJF8sstt0PAAAAAAAAAAAAAChJf/cc3qudiEFEPLm4WFVV/dM0Wj+qo97+pCt9+6FkbT/JAwDAro+eGrmWvxMxHxFX03f9DRYXF6tqfmGxWqwW5vL72eHcfLXQ+Fybp/WyueEh3hD3h1X9y+Ybt2s66PPyQe2jv6++r2HVO0THjqYfaWaQ/poT1mspbABIdl+NtrwinTJV9fSkNx+wj/3/FFqKpbYfV8y+th+mAAAAwPGrqqrqpK/zPpOO+Xfb7hQAMBX59X/0uMCR6u6E9ojH8/vVarVarVZ/qrqpGu9es4iIjeZt6vcMhuMHgBNmIz5uuwu0SP5F60fEc213AphpnbY7wLHY2l5f7aR8O83XgzS+ez4XZF/+G52d2+Xbj5seZPQck2k9vjajF89M6M+zU+rDLMn5d0fzv7bbPkzrHXf+0zIp/+HOJXPlyfn3RvMfcXry747Nv1Q5//4j5d+TPwAAAAAAzLD8//9Ljv/mTQYAAAAAAACAE2dre301X/eaj/9/bsx6rv88nXL+nUfNfyHNy/9Ey/l3R/L/8sh6vcb8/Tf39v9/b6+v/v7Ovz6bp4fNfy7PdNIjq5MeEZ10T51+mh5l6x62OegN63sadLq9fjrnpxq8EzfiZqzFhX3rdtPfY699ZV973dPBvvaL+9r7D7Vf2tc+SN87UC3k9nOxGj+Jm/H2TnvdNnfA9s8f0F4d0J7z73n+L1LOv9/4qfNfTO2dkWnt/ofdh/b75nTc/bxx4/O/vHD8m3Ogzeg92LamevvOttCfnb/JE8P42e21W+fuXr9z59ZKpMm+pRcjTR6znP9g52du7/n/hd32/Lzf3F/vfzh85PxnxWb0J+b/QmO+3t6Xpty3NuT8h+kn5/92ah+//5/k/Cfv/y+30B8AAAAAAAAAAAAAAAD4JFVV7Vwi+kZEXE7X/7R1bSYAMF359b9K8nK1Wq1Wq9Wnr26qxnu9WUTEX5u3qd8z/GLcLwMAZtn/IuIfbXeC1si/YPn7/urpi213Bpiq2+9/8KPrN2+u3brddk8AAAAAAAAAgE8rj/+53Bj/+cWIWBpZb9/4r2/G8lHH/+znmQcDjD7mgb4n2OwOe93GcOPPx8743Ocmjf99Nh4e/zuPidtrbscEgwPahwe0zx3QPj926V5aYy/0aMj5P98Y77zO/8zI8OsljP86OuZ9CXL+ZxuP5zr/L42s18y/+u3M5b9x2BU3o7sv//N33vvp+dvvf/DKjfeuv7v27tqPL62sXLh0+fKVK1fOv3Pj5tqF3X+Pp9czIOefx752HmhZcv45c/mXJef/hVTLvyw5/y+mWv5lyfnn93vyL0vOP3/2kX9Zcv4vpVr+Zcn5fyXV8i/L1vb6XJ3/y6mWf1ny/v/VVMu/LDn/V1It/7Lk/M+lWv5lyfmfT/Uh8vf18KdIzj8f4bL/lyXnv5Jq+Zcl538x1fIvS87/UqrlX5ac/6upln9Zcv5fS7X8y5Lzv5xq+Zcl5//1VMu/LDn/K6mWf1ly/t9ItfzLkvP/ZqrlX5ac/2upln9Zcv7fSrX8y5Lz/3aq5V+WnP93Ui3/suT8X0+1/Muy9/3/ZsyYMZNn2n5mAgAAAAAAAAAAAABGTeN04ra3EQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD+zw4cCAAAAAAA+b82QlVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVRV24EAAAAAAAMj/tRGqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsHdvMXLd9R3Az+zFXjuEGAjBSQ1sEhOMs2TXl/hC62LCtQFKgYRCL9iud20WfMNrl0CRbBookTAqqqiaPrQFhNpIVYVV8UArSvNQ9fJU2gf6UlFVQmpUBRRQkdqKZquZ8///PTM7OzPrHa9nz//zkezf7syZOWfOnJnd79rfPQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADQ7O43zn22VhRF/U/jry1F8YL6x5smtzQue93N3kIAAABgtf6v8fdzt6ULDvdxo6Zl/u4V//j1xcXFxeL9o787/sXFxXTFZFGMbyyKxnXR1X//QK15meDxYqI20vT5SI/Vj/a4fqzH9eM9rt/Q4/qNPa6f6HH9kh2wxKby5zGNO9ve+HBLuUuL24vxxnXbO9zq8drGkZH4s5yGWuM2i+MnivniVDFXzLQsXy5bayz/zbvr63pbEdc10rSubfUj5IefPB63oRb28faWdV27z+j7bygmf/TDTx7/4wvP3tlp9twNLfdXbueOe+rb+elwSbmttWJj2idxO0eatnNbh+dktGU7a43b1T9u387n+tzO0Wubuaban/OJYqTx8bcb+2ms+cd6aT9tC5f9971FUVy+ttntyyxZVzFSbG65ZOTa8zNRHpH1+6gfSi8uxlZ0nN7dx3Fan7PbW4/T9tdEfP7vDrcbW2Ybmp+m739qw5LnfaXHaVR/1Mu9VtqPwUG/VoblGIzHxbcbD/qJjsfg9vD4P3nf8sdgx2OnwzGYHnfTMXhPr2NwZMNoY5vTk1Br3ObaMbirZfnRxppqjfnMfd2PwekLp89NL3z8E6+dP33s5NzJuTN7du2a2bNv34EDB6ZPzJ+amyn/vs69Pfw2FyPpNXBP2HfxNfDqtmWbD9XFLw/udTjR5XW4pW3ZQb8Ox9ofXG1tXpBLj+nytfFIfadPXBkplnmNNZ6fnat/HabH3fQ6HGt6HXb8mtLhdTjWx+uwvsy5nf19zzLW9KfTNtyorwVbmo7B9u9H2o/BQX8/MizH4EQ4Lv515/JfC7aF7X1iaqXfj4wuOQbTww3vPfVL0vf7Ewcao9NxeVf9ils2FBcX5s4/8NixCxfO7yrCWBMvaTpW2o/XzU2PqVhyvI6s+Hg9PP+KJ+7qcPmWsK8mXlv/a2LZ56q+zN4Huj9Xja9unfdny6W7izAGbK33Z6ev5vX9mbJkl/1ZX+bT06v/Xjzl0qb33/Fl3n9j7n++XF+6q8dHx8fK1+9o2jvjLe/HrU/VWOO9q9ZY93PT/b0fj4c/a/1+fHuX9+OtbcsO+v14vP3BxffjWq+fdqxO+/M5EY6TUzPd34/ry2zdvdJjcqzr+/G9YdbC/n9NSAopFzUdO8sdt2ldY2Pj4XGNxTW0Hqd7WpYfD9msvq6ndl/fcbrj3vK+RtOju2atjtPJtmUHfZym96vljtNar5++XZ/253MiHBe37+l+nNaXeXrv6t87N8UPm947N/Q6BsdHN9S3eTwdhOX7/eKmeAw+UBwvzhanitnGtRsax1Otsa6pB/s7BjeEP2v9Xrm1yzG4o23ZQR+D6evYcsdebWzpgx+A9udzIhwXTz7Y/RisL/Om/YP93nVHuCQt0/S9a/vP15b7mdddbbvpRv7Mq76df7O/+89m68ucOrDSnNl9P90fLrmlw35qf/0u95qaLdZmP20N2/nsgeX3U3176st88WCfx9PhoiguffShxs97w7+v/PnF73y95d9dOv2bzqWPPvSDW0/87Uq2H4D17/lybC6/1jX9y1Q///4PAAAArAsx94+Emcj/AAAAUBkx98f/FZ7I/wAAAFAZMfePhZlkkv+3vunZ+ecvFamZvxjE69NueLhcLnZcZ8Lnk4vX1C9/6KtzP/7LS/2te6Qoip88/Bsdl9/6cNyu0mTYzqtvbr186Q0v9bX+o49eW665v/6lcP/x8fR7GHSq4M4URfHN2z7fWM/kB6405tMPH23M91x+4vH6Ms8dLD+Pt3/mJeXyfxDKv4dPHGu5/TNhP3wvzJm3d94f8XZfu/Kabfvfd2198Xa1e17YeNhPfrC83/h7cr7weLl83M/Lbf9ffe6pr9WXf+xVnbf/0kjn7X8q3O9Xw/yfl5fLNz8H9c/j7T4Ttj+uL97uga98q+P2X/1sufy5t5TLHQ0zrn9H+Hz7W56db95fj9WOtTyu4q3lcnH9M9/57cb18f7i/bdv/8SRKy37o/34ePqfy/uZbls+Xh7XE/1F2/rr99N8fMb1P/VbR1v2c6/1X33PMy+v32/7+u9vW2607fbtv7HpDz/z+Y7ri9tz+M/OtTyew+8Or+Ow/ic/GI7HcP3/Xv18y3qjo+9uff+Jy39py6WWxxO97Ufl+q++/mRj/sfkj3//lhfc+sLLr6zvu6L49nvL++u1/pN/dLZl+798x87G8xGvjx399vUvJ67//MemzpxduDg/27RXG7875x3l9myc2LS5vr23hffW9s+PnL3wobnzkzOTM0UxWd1foXfdvhLmD8pxeaW33/loeD7v+r1vbr7vnz4XL/+XR8rLr7y9/Lr16rDcF8LlW8rnb7G2yvU/efcdjdd37eny85Ye+wBs2/6fB/paMDz+9u8L4vF+7qUfauyH+nWNrxvxdb3K7f/ubHk/3wj7dTH8ZuZ77ri2vubl4+9GuPLe8vW+6v0X3ubi8/on4fl+5/fK+4/bFR/vd8P3Md/a2vp+F4+Pb1waab//xm/xuBzeT4rL5fVxqbi/rzx3R8fNi7+HpLh8Z+Pz30n3c+eKHuZyFj6+MH1q/szFx6YvzC1cmF74+CeOnD578cyFI43f5Xnkw71uf+39aXPj/Wl2bt/eYmZTURRni5k1eMO6Mdtf/6i/7T/36PHZ/TP3zc6dOHbxxIVHz82dP3l8YeH43OzCfcdOnJj7WK/bz88e2rX74J79u6dOzs8eOnDw4J6DU/NnztY3o9yoHvbNfGTqzPkjjZssHNp7cNeDD+6dmTp9dnbu0P6ZmamLvW7f+No0Vb/1r0+dnzt17ML86bmphflPzB3adXDfvt09fxvg6XMnFianz188M31xYe78dPlYJi80Lq5/7et1e6pp4d/K72fb1cpfxFe86/596fez1n31U8veVblI2y8QfTb8Lpp/eNG5A/18HnP/eJhJJvkfAAAAchBz/4YwE/kfAAAAKiPm/o1hJvI/AAAAVEbM/RNhJpnkf/1//f/++v/l9fr/efX/z3207JWu9/5/7M/r/+fhJvf/V71+/X/9/+r1//vvz6/37df/1/9nqWHr/8fcv6kossz/AAAAkIOY+zeHmcj/AAAAUBkx998SZiL/AwAAQGXE3P+CMJNM8r/+f1/9/929ClfV7/87/7/+f7E++//xydH/z8aK+/fve6TlU/3/QP9f/1//X/9f/59VG1/2mpvV/4+5/9Ywk0zyPwAAAOQg5v4XhpnI/wAAAFAZMfffFmYi/wMAAEBlxNy/Jcwkk/yv/+/8//r/+v+V7v+v9vz/TRuj/78+OP9/d/r/PVx3/39C/3899v/HB7v9w93/77n5+v/cEMN2/v+Y+18UZpJJ/gcAAIAcxNz/4jAT+R8AAAAqI+b+l4SZyP8AAABQGTH33x5mkkn+1//X/9f/1//X/++8/t7n/y8/0v8fLvr/3en/9+D8/3n1/we8/cPd/x/0+f/H39x+e/1/Ohm2/n/M/S8NM8kk/wMAAEAOYu6/I8xE/gcAAIDKiLn/ZWEm8j8AAABURsz9W8NMMsn/+v/6//r/+v/6/53X37v/X9L/Hy76/93p//eg/6//r//fX/+/wze/+v90Mmz9/5j77wwzyST/AwAAQA5i7r8rzET+BwAAgMqIuf+nwkzkfwAAAKiMmPu3hZlkkv/1//X/9f/z6v/fv0H/X/+/2vT/u9P/70H/X/9f/7/P8/8vtZL+/8Zed0ZlDFv/P+b+l4eZZJL/AQAAIAcx978izET+BwAAgMqIuf+VYSbyPwAAAFRGzP2TYSaZ5H/9/2r1///0r598ZaH/r//fY/0V7f/Hw0D/P3P6/93p//eg/6//r/+/Jv1/8jFs/f+Y++8OM8kk/wMAAEAOYu6/J8xE/gcAAIDKiLn/3jAT+R8AAAAqI+b+7WEmmeR//f9q9f8j/X/9/27rr2j/P9H/z5v+fwdNL1L9/x70//X/s+//x+9+9f8ZjGHr/8fc/6owk0zyPwAAAOQg5v77wkzkfwAAAKiMmPtfHWYi/wMAAEBlxNy/I8wkk/yv/6//r/+v/6//33n9+v/rk/5/dyvt/2/Q/9f/1//PrP/v/P8M1rD1/2Puf02YSSb5HwAAAHIQc//OMBP5HwAAACoj/v/N8v+9yv8AAABQRTH3T4WZZJL/9f/1/3Pq/9f0//X/9f8rT/+/O+f/70H/X/9f/1//n4Eatv5/zP2vDTPJJP8DAABADmLufyDMRP4HAACAyoi5fzrMRP4HAACAyoi5fybMJJP8r/+v/59T/9/5//X/9f+rT/+/O/3/HvT/9f+r1v8vCv1/bqph6//H3L8rzCST/A8AAAA5iLl/d5iJ/A8AAACVEXP/njAT+R8AAAAqI+b+vWEmmeR//X/9f/1//X/9/87r1/9fn/T/u9P/70H/X/+/av1/5//nJhu2/n/M/Q+GmWSS/wEAACAHMffvCzOR/wEAAKAyYu7fH2YS8n+n/9cNAAAArC8x9x8IM8nk3//1/yvS///Nv29Zt/6//n+39Q+m/79J/z9M/f/hUtH+f/vL4rrp//eg/6//r/+v/89ADVv/P+b+g2EmmeR/AAAAyEHM/a8LM5H/AQAAoDJi7v/pMBP5HwAAACoj5v6fCTPJJP/r/1ek/99G/1//v9v6nf9f/7/KKtr/H5hK9f9H9P/1/4dr+/X/9f9Z6sb3/+NH/fX/Y+4/FGaSSf4HAACAHMTc/7NhJvI/AAAAVEbM/a8PM5H/AQAAoDJi7j8cZpJJ/tf/1//X/9f/vzH9/9cX7Yax/18/ePT/q0X/v7tK9f+d/1//f8i2X/9f/5+lhu38/zH3vyHMJJP8DwAAADmIuf+hMBP5HwAAACoj5v43hpnI/wAAAFAZMfe/Kcwkk/yv/6//r/+v/+/8/53Xr/+/PuXb/+9vVfr/Pej/6//r/+v/M1DD1v+Puf/NYSaZ5H8AAADIQcz9bwkzkf8BAACgMmLuf2uYifwPAAAAlRFz/9vCTDLJ//r/+v/6//r/+v+d16//vz7l2//vj/5/D/r/+v/6//r/DNSw9f9j7v+5MJNM8j8AAADkIOb+h8NM5H8AAACojJj73x5mIv8DAABAZcTc/44wk0zyv/6//r/+v/6//n/n9ev/r0/6/93p//eg/6//r/+v/89ADVv/P+b+d4aZZJL/AQAAIAcx9/98mIn8DwAAAJURc/+7wkzkfwAAAKiMmPt/Icwkk/yv/6//P1z9/8VLzbfT/9f/LwbV/6/fSP8/C/r/3en/99Ch/79R/1//X/9f/5/rNmz9/5j73x1mkkn+BwAAgBzE3P+eMBP5HwAAACoj5v73hpnI/wAAAFAZMfc/EmaSSf7X/x/u/v9oOB/+gPv/6SEPX//f+f/1/53/X/9/dfT/u9P/78H5//X/9f/1/xmoYev/x9z/aJhJJvkfAAAAchBz//vCTOR/AAAAqIyY+38xzET+BwAAgMqIuf/9YSaZ5H/9/+Hu/+d3/v+q9f/HWo6PnPr/E03PZzou9f/1/9eA/n93+v896P/r/w9z/z8czZuWub3+P8No2Pr/Mfd/IMwkk/wPAAAAOYi5/5fCTOR/AAAAqIyY+385zET+BwAAgMqIuf9Xwkwyyf/6//r/+v/O/+/8/53Xr/+/Pun/d6f/34P+v/7/MPf/e9D/ZxgNW/8/5v5fDTNZNvj94L/6eJgAAADAEIm5/4NhJpn8+z8AAADkIOb+I2Em8j8AAABURsz9R8NMMsn/+v/t/f94RlX9f/1//X/9f/3/9Whw/f+X3VoU+v/6//r/+v/6//r/rMaw9f9j7j8WZpJJ/gcAAIAcxNz/a2Em8j8AAABURsz9x8NM5H8AAACojJj7Z8NMMsn/N7H/Pz6c/X/n/7/e/v9P9P/1/wP9/870/9eG8/93p//fg/6//n/9CdD/1/9nYIat/x9z/1yYSSb5HwAAACos/Tg45v4TYSbyPwAAAFRGzP0nw0zkfwAAAKiMmPs/FGaSSf53/n/9f+f/vxn9/7GW5fX/S/r/+v+DoP/fnf5/D/r/+v/O/6//z0ANW/8/5v75MJNM8j8AAADkIOb+D4eZyP8AAABQGTH3fyTMRP4HAACAyoi5/1SYSSb5X/9f/z/3/n+tKC47/7/+f6f16/+vT/r/3en/95BZ/3+s7fOb3Z9frZu9/fr/+v8sNWz9/5j7T4eZZJL/AQAAIAcx958JM5H/AQAAoDL+n737aJLsLPY4XFdXGrO69yOwZsUSVuIjsGVHBGsCJ7yRhPcgvDfCe++d8N57L7y3wgoihlBPZs50d82pMTVd57z5PJtkJtRUtVSS+NPxi5O7/55xi/0PAAAAw8jdf6+4pcn+1//r/7v3/6udPP9//x+v/z9N/6//34ZD/f3BwDmcKwo/Z/9/hzteczf9v/5/sP7/oF3380t///p//T+Hza3/z91/77ilyf4HAACADnL33ydusf8BAABgGLn77xu32P8AAAAwjNz918QtTfa//l//r//X/+/r/2/S/+v/l83z/6fp/zfQ/+v/9f/6f7Zqbv1/7v77xS1N9j8AAAB0kLv//nGL/Q8AAADDyN3/gLjF/gcAAIBh5O5/YNzSZP/r//X/+v+l9P/HPP//wPej/9f/r6P/n6b/30D/r//X/+v/2aq59f+5+x8UtzTZ/wAAANBB7v4Hxy32PwAAAAwjd/9D4hb7HwAAAIaRu/+hcUuT/a//1//37v9P7b31ZfT/R/T8f/2//n/hblyd+WeC/v8w/f8GG/r/1Ur/P+W8+/n1395y3v859On/1/0vX/0/682t/8/d/7C45c6r1bGL/SYBAACAWcnd//C4pcnP/wEAAKCD3P3Xxi32PwAAAAwjd/91cUuT/a//1//37v9P0//r/9e9vv5/mTz/f9ql9/+3//973L1v/+/5/9M8/3/b/f9tn4w59v/r6f9ZZ279f+7+6+OWJvsfAAAAOsjd/4i4xf4HAACAYeTuf2TcYv8DAADAMHL3PypuabL/9f+j9f//u+/rzur/92oX/b/+X/+v/x+d/n+a5/9vsPePuZP1S/2//t/z//X/XJq59f+5+x8dtzTZ/wAAANBB7v7HxC32PwAAAAwjd/9j4xb7HwAAAIaRu/9xcUuT/a//H63/3/91nv+v/1/3+vp//f/I9P/T9P8bjPL8/4v81Oy6n79Uu37/+n/9P4fNrf/P3f/4uKXJ/gcAAIAOcvc/IW6x/wEAAGAYufufGLfY/wAAADCM3P1Pilua7H/9v/5/Gf1/voL+X/9/+fv/pP9fJv3/NP3/BqP0/xdp1/380t+//l//z2Fz6/9z9z85bmmy/wEAAKCD3P1PiVvsfwAAABhG7v6nxi32PwAAAAwjd//T4pYm+1//r/9fRv/v+f/6f8//1/+fH/3/NP3/Bvp//b/+X//PVs2t/8/df0Pc0mT/AwAAQAe5+58et9j/AAAAMIzc/c+IW+x/AAAAGEbu/mfGLU32v/5f/6//1//r/9e/vv5/mfT/0/T/G+j/9f/6f/0/WzWj/v+srzqxelbc0mT/AwAAQAe5+58dt9j/AAAAMIzc/c+JW+x/AAAAGEbu/ufGLU32v/5/Nv3/Xs43Vv9/crVa6f9XTfv/k2f99azPpf5f/38EFtT/r/tXh/5f/6//X/D71//r/zlsRv3/3q9z9z8vbmmy/wEAAKCD3P3Pj1vsfwAAABhG7v4XxC32PwAAAAwjd/8L45Ym+1//P5v+f89Y/b/n/x/8fHTq/z3//zD9/9FYUP+/lv5f/6//X+771//r/zlsbv1/7v4XxU3HrrrobxEAAACYmdz9L45bmvz8HwAAADrI3f+SuMX+BwAAgIW64dDv5O5/adzSZP/r/7fb/x876/f0//r/g58P/b/+X/9/+en/p+n/N9D/6//1//p/tmpu/X/u/pfFLU32PwAAAHSQu//GuMX+BwAAgGHk7n953GL/AwAAwDBy978ibmmy//X/nv+v/9f/6//Xv77+f5n0/9P0/xvo//X/u+3/j5/5j/p/xnAB/f+pU6euvez9f+7+V8YtTfY/AAAAdJC7/1Vxi/0PAAAAw8jd/+q4xf4HAACAYeTuf03c0mT/6/+b9v/5UV9W/3/daqX/1//r//X/0/T/0/T/G+j/9f+e/6//Z6vm9vz/3P2vjVua7H8AAADoIHf/6+IW+x8AAACGkbv/9XGL/Q8AAADDyN3/hrilyf7X/zft/z3/X/+v/z/q/v/Wlf7/SCyi/z957tefe/9/vf5f/z+hXf9/lzvt+6X+X//PYXPr/3P3vzFuabL/AQAAoIPc/W+KW+x/AAAAGEbu/jfHLfY/AAAADCN3/1vipiub7H/9v/5f/6//1/+vf/0jfv7/sdVqpf/fgkX0/xPm3v9v5/n/B/8uP0P/r/9f8vvX/+v/OWxu/X/u/rfGLU32PwAAAHSQu/9tcYv9DwAAAMPI3f/2uMX+BwAAgGHk7n9H3NJk/+v/9f/6f/3/8P3/9Yvo/z3/f0v0/9Pm0f+fm/5f/7/k96//1/9z/nbV/+fuf2fc0mT/AwAAQAe5+98Vt9j/AAAAMIzc/e+OW+x/AAAAGEbu/vfELU32v/5f/38h/X++T/3/WP3/8dn1/yf2/fc1ef6//n9L9P/T9P8b6P/1//r/G/T/bNPcnv+fu/+9cUuT/Q8AAAAd5O5/X9z6v27tfwAAABhG7v73xy32PwAAAAwjd/8H4pYm+1//r//3/H/9//DP/9f/t6L/n6b/30D/r//X/3v+P1s1t/4/d/8H45Ym+x8AAAA6yN3/objF/gcAAIBh5O7/cNxi/wMAAMAwcvffFLc02f/6f/2//l//r/8//ddQ/z8G/f+0o+n/T+r/9f/Vz/9P/F2g/9f/b/p6xjS3/j93/0filib7HwAAADrI3f/RuMX+BwAAgGHk7v9Y3GL/AwAAwCJdueb3cvd/PG5psv/1//p//b/+X/+//vX1/8u0k/4/PxT6f8//D336/9vt+9XSnv9/8N9f+n/9P9s3t/4/d/8n4pYm+x8AAAA6yN3/ybjF/gcAAIBh5O7/VNxi/wMAAMAwcvd/Om5psv/1//p//b/+X/+//vX1/8vk+f/T9P8b6P93+vz8pb9//b/+n8Pm1v/n7v9M3NJk/wMAAEAHufs/G7fY/wAAADCM3P2fi1vsfwAAABjG3u7PuKzh/tf/6//1//p//f/619f/L5P+f5r+fwP9v/5f/6//Z6vm1v9/fu+rTqy+ELc02f8AAADQQe7+L8Yt9j8AAAAMI3f/l+IW+x8AAACGkbv/y3FLk/2v/9f/L6P/P3Xq1LX6f/3//u/nTP9/s/6fov+fpv/fQP+v/9f/6//Zqrn1/7n7vxK3NNn/AAAA0EHu/q/GLfY/AAAADCN3/9fiFvsfAAAAhpG7/+txS5P9r/+fQf9/Qv/v+f/6/5Xn/+v/t0T/P03/v8GI/f+J8//2d93PX6pdv3/9v/6fw+bW/+fu/0bc0mT/AwAAQAe5+78Zt9j/AAAAMIzc/d+KW+x/AAAAGEbu/m/HLU32v/7/6Pr/2/7cdXn+/8nV+vev/9f/6//1/5eb/n+a/n+DEfv/C7Drfn7p71//r//nsLn1/7n7vxO37B9+V13YdwkAAADMSe7+78YtTX7+DwAAAB3k7v9e3GL/AwAAwDBy938/bmmy//X/M3j+/4D9v+f/r/986P9n3f9fof8fg/5/mv5/A/2//l//v6X+Pz/N+v/u5tb/5+7/QdzSZP8DAABAB7n7fxi32P8AAAAwjNz9P4pb7H8AAAAYRu7+m+OWs/b/urZ7FPp//b/+X/+v/1//+vr/ZdL/Tzvf/v/46tL6/6T/1//r/7v2/57/z2lz6/9z9/84bvHzfwAAAFicq87x+7n7fxK32P8AAAAwjNz9P41b7H8AAAAYRu7+n8Utt1yxq7d0pPT/+n/9v/5f/7/+9fX/y6T/n+b5/xvo/7fRz1+t/x+j/1+t9P9curn1/7n7fx63+Pk/AAAADCN3/y/iFvsfAAAAhpG7/5dxi/0PAAAAw8jd/6u4pcn+1/8P3v+fXK1Wl7f/30sz9f+n6f9P0/+vp/8/Gvr/afr/DfT/nv+v//f8f7Zqbv1/7v5fxy1N9j8AAAB0kLv/N3GL/Q8AAADDyN3/27jF/gcAAIBh5O7/XdzSZP/vrP+PP9X6f8//1//r//X/+v9t0v9P0/9voP/X/+v/9f9s1dz6/9z9v49bmux/AAAA6CB3/x/iFvsfAAAAhpG7/49xi/0PAAAAw8jd/6e4pcn+9/x//b/+X/+v/1//+vr/ZdL/T9P/r1d/ofT/+n/9v/6frZpb/5+7/89xS5P9DwAAAB3k7v9L3GL/AwAAwDBy998St9j/AAAAMIzc/X+NW5rsf/2//l//r//X/69/ff3/Mun/p+2y/7/r/21+Wc//33n/n29B/6//1/+zFXPr/3P3/y1uabL/AQAAoIPc/X+PW+x/AAAAGEbu/n/ELfY/AAAADCN3/z/jlib7f0P/f7z+QP3/JP3//vev/1//+dD/6//1/5ef/n+a5/9voP/3/H/9v/6frZpb/5+7/19xS5P9DwAAAB3k7r81brH/AQAAYBi5+/8dt9j/AAAAMIzc/f+JW5rsf8//X1L/f7X+X/+v/9f/6/830P9P0/9voP/X/+v/9f9s1dz6/9z9/w0AAP//bKhVXg==") mount$tmpfs(0x0, &(0x7f00000002c0)='./file0\x00', &(0x7f0000000300), 0x0, 0x0) chdir(&(0x7f0000000140)='./file0\x00') mkdirat(0xffffffffffffff9c, &(0x7f0000000100)='./file0\x00', 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='net_prio.prioidx\x00', 0x275a, 0x0) r1 = fanotify_init(0x0, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='net_prio.prioidx\x00', 0x275a, 0x0) fanotify_mark(r1, 0x101, 0x20, r2, 0x0) fanotify_mark(r1, 0x102, 0x28, r0, 0x0) syz_mount_image$fuse(0x0, &(0x7f0000001040)='./file2\x00', 0x0, 0x0, 0x0, 0x0, 0x0) mount$overlay(0x0, &(0x7f0000000040)='./file0\x00', &(0x7f0000000000), 0x0, &(0x7f0000000100)={[{@workdir={'workdir', 0x3d, './file0'}}, {@lowerdir={'lowerdir', 0x3d, '.'}}, {@upperdir={'upperdir', 0x3d, './file2'}}], [], 0x2c}) syz_mount_image$fuse(&(0x7f0000000140), &(0x7f00000001c0)='./file0\x00', 0x40000, &(0x7f0000000380), 0x1, 0x0, 0x0) r3 = socket$unix(0x1, 0x1, 0x0) bind$unix(r3, &(0x7f00000000c0)=@file={0x1, '\xe9\x1fq\x89Y\x1e\x923aK\x00'}, 0x6e) 32.855392171s ago: executing program 1 (id=264): r0 = socket$inet6_tcp(0xa, 0x1, 0x0) accept4(0xffffffffffffffff, 0x0, 0x0, 0x80800) ioctl$sock_SIOCGIFVLAN_SET_VLAN_NAME_TYPE_CMD(0xffffffffffffffff, 0x8982, 0x0) setsockopt$IP6T_SO_SET_ADD_COUNTERS(r0, 0x29, 0x41, &(0x7f0000002600)=ANY=[@ANYBLOB], 0x48) fcntl$F_SET_RW_HINT(r0, 0x406, 0x0) r1 = mq_open(&(0x7f0000000040)='*\x00', 0x40, 0x0, 0x0) accept4(0xffffffffffffffff, 0x0, 0x0, 0x80800) openat$urandom(0xffffffffffffff9c, 0x0, 0x0, 0x0) mq_open(0x0, 0x0, 0x0, &(0x7f0000000100)={0x4, 0x9}) ioctl$FITRIM(0xffffffffffffffff, 0x5452, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, 0x0) ioctl$BTRFS_IOC_DEFAULT_SUBVOL(0xffffffffffffffff, 0x40089413, &(0x7f0000002580)) ioctl$BTRFS_IOC_INO_LOOKUP(r1, 0x5450, 0x0) 32.783138129s ago: executing program 1 (id=265): r0 = openat$audio1(0xffffffffffffff9c, &(0x7f0000000040), 0x0, 0x0) ioctl$SNDCTL_DSP_SETFMT(r0, 0xc0045005, &(0x7f0000000000)=0x20) prlimit64(0x0, 0xe, &(0x7f0000000140)={0x8, 0x8b}, 0x0) sched_setscheduler(0x0, 0x1, &(0x7f0000000180)=0x7) r1 = getpid() sched_setscheduler(r1, 0x2, &(0x7f0000000200)=0x7) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0xb635773f06ebbeee, 0x8031, 0xffffffffffffffff, 0x0) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000200)={0xffffffffffffffff, 0xffffffffffffffff}) connect$unix(r2, &(0x7f000057eff8)=@abs, 0x6e) sendmmsg$unix(r3, &(0x7f0000000000), 0x651, 0x0) recvmmsg(r2, &(0x7f00000000c0), 0x10106, 0x2, 0x0) sched_setscheduler(0x0, 0x2, &(0x7f0000000200)=0x4) mmap$dsp(&(0x7f0000ffc000/0x4000)=nil, 0x4000, 0x200000f, 0x40a2012, r0, 0x0) 31.944758528s ago: executing program 3 (id=266): r0 = socket$inet_tcp(0x2, 0x3, 0x6) r1 = socket$inet(0x2, 0x4000000000080001, 0x0) setsockopt$IPT_SO_SET_REPLACE(r1, 0x0, 0x40, &(0x7f0000000240), 0x1) mlock2(&(0x7f0000ff8000/0x1000)=nil, 0x1000, 0x1) r2 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x18, 0x4, &(0x7f0000000180)=ANY=[@ANYBLOB="18010000000000000000000000030000850000007b00000095"], &(0x7f0000000600)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000040)={&(0x7f0000000600)='tlb_flush\x00', r2}, 0x10) r3 = bpf$MAP_CREATE(0x0, &(0x7f0000000180)=@base={0xb, 0x7, 0x10001, 0x9, 0x1}, 0x48) r4 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPSET_CMD_CREATE(r4, &(0x7f0000001080)={0x0, 0x0, &(0x7f0000000080)={&(0x7f0000000000)={0x6c, 0x2, 0x6, 0x201, 0x0, 0x0, {}, [@IPSET_ATTR_TYPENAME={0x10, 0x3, 'bitmap:port\x00'}, @IPSET_ATTR_REVISION={0x5}, @IPSET_ATTR_SETNAME={0x9, 0x2, 'syz2\x00'}, @IPSET_ATTR_FAMILY={0x5}, @IPSET_ATTR_PROTOCOL={0x5, 0x1, 0x6}, @IPSET_ATTR_DATA={0x24, 0x7, 0x0, 0x1, [@IPSET_ATTR_CADT_FLAGS={0x8, 0x8, 0x0}, @IPSET_ATTR_PORT={0x6}, @IPSET_ATTR_PORT_TO={0x6}, @IPSET_ATTR_TIMEOUT={0x8}]}]}, 0x6c}}, 0x0) r5 = bpf$PROG_LOAD(0x5, &(0x7f00000000c0)={0x11, 0xd, &(0x7f0000000280)=ANY=[@ANYBLOB="18010000000000000000000000000000850000006d00000018110000", @ANYRES32=r3, @ANYBLOB="0000000000000000b7080000000000007b8af8ff00000000bfa200000000000007020000f8ffffffb703000008000000b704000000000000850000000300000095"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x90) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000001c0)={&(0x7f0000000000)='tlb_flush\x00', r5}, 0x10) bpf$MAP_UPDATE_CONST_STR(0x2, &(0x7f00000007c0)={{r3}, &(0x7f0000000740), &(0x7f0000000780)='%ps \x00'}, 0x20) bpf$MAP_CREATE(0x0, &(0x7f00000009c0)=@base={0x5, 0x1, 0x8, 0x8}, 0x48) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0', [{}], 0xa, "574af8bb1896a80b7daea4e12ad57525cbc81bd0936a5189eb66195a8ba53d0995077efd555a693b482c816ea6216067d24b152a70c55905f84c3513ea21f0efdeb0419de5c217"}, 0x53) 31.942315809s ago: executing program 0 (id=267): prlimit64(0x0, 0xe, &(0x7f0000000140)={0x8, 0x100008b}, 0x0) sched_setscheduler(0x0, 0x1, &(0x7f0000000080)=0x7) r0 = getpid() sched_setscheduler(r0, 0x1, &(0x7f0000000100)=0x5) socketpair$unix(0x1, 0x3, 0x0, &(0x7f0000001480)={0xffffffffffffffff, 0xffffffffffffffff}) connect$unix(r1, &(0x7f000057eff8)=@abs, 0x6e) sendmmsg$unix(r2, &(0x7f00000bd000), 0x318, 0x0) recvmmsg(r1, &(0x7f00000000c0), 0x10106, 0x2, 0x0) socket$nl_xfrm(0x10, 0x3, 0x6) syz_mount_image$hfsplus(&(0x7f00000008c0), &(0x7f0000000100)='\x13\x13w\xc5\xfc5\xd4\x14T\xd5\xd4\x1d)\xad\x1a`)Y\x81F\xe6\xbe\x16nA\xad\r\xbd@T\x03<\x9f3\xbb\xda\x82$\xa2\xf3\xd7r\xe7cnH\xb3<\xbfp\x83r\xe8\xf1\xb9\x93>\xc5\x12wC\xbe\"\x06 \x9e\xf0-\xf9\xcb\xf2\xf6\xe8\x80\xd38/\x00', 0x0, &(0x7f0000006780)={[{@nls={'nls', 0x3d, 'macturkish'}}, {@barrier}, {@uid}, {@gid={'gid', 0x3d, 0xee01}}, {@creator={'creator', 0x3d, "ca30c7e7"}}, {@nobarrier}]}, 0x3, 0x6ab, &(0x7f00000009c0)="$eJzs3U2InHcdB/DvM5nMZiKk2zZNowhZGijaYLKboSaC0CgiOQQJeul1STbNkklaNltJi5iJWgVPnqQHDxWJh55ERKgnsZ4FwYun3APePOSgrjzPPDM7uzvZ7CbZnW37+cCzz/+Z/9vv+fV5mXmmYQJ8Zp1/Pft7KXL+xIVb5fa9u53uvbud64NykqkkjaTZX6VoJ8XHybn0l3y+fLEernjYPK/e/6hovv9hp7/VrJeqfWOzfhuMbdlLDgw39iWZ6Rf/s4UBG+PHq5ZqnEur4z2mYhh3mbDjg8TBpK1s0FutHHdqrLX18xbYs27375sbTCcH07+7lu8DUl8dHn1lmLxNr0293YsDAAAAdsrYz/KjnnmQB7mVQ7sTDgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHw6FP3fDCzqpTEoz6QY/P5/a+Q39VsTDvcJvXelWn33mUkHAgAAAAAAAABP5NiDPMitHBpsrxTVd/4vVRuHq7+fy9u5mYUs5WRuZT7LWc5S5pJMjwzUujW/vLw0t7HnL1P2XFlZuV33PD225+m1cfXWBzru/zTY0AgAAAAAAAAAPrN+lPOr3/8DAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMBeUCT7+qtqOTwoT6fRTHIgSauYGTZvTTTYp+DPkw4AAAAAdl67Xh8q/tcvrBTVZ/4j1ef+A3k7N7KcxSynm4Vcrp4F9D/1N/7e63Tv3e1cL5eNA3/jX9uKoxox/WcP42eerVq8MOxxPt/O93IiM7mYpSzm+5nPchYyk29VpfkUma6fXkzfu9vOINaN8Z5bs3VxfWzHRsplfEerSNq5ksUqtpO51BqE3qjbHR2Z7Y+tZN2Md8rsFK/Vtpijy/W63KNf1Ou9Ybra8/3DjMzWuS+z8exo3jfmfpvHyfqZ5tIYPoM6vDpLubl+psfK+cF6Xeb6pzub820+Slubid7Py63B0Xdk85wnX/7HXy5ebdy4dvXKzRN75zB6TOuPic5IJl7cUia6ZSZ6T5CJA08S/9PTqrPRv4pu72r5UtX3UBbznbyZy1nImcxmLmczm6/ldDo5PZLXFzbPa3WuNbZ3rh3/Ul0o70k/G7k37Zqph1WUeX12JK+jV7rpqm70ldUsPbeFLBWtjM/SP8eG0vxCXSjn+PHIHWfy1mdibiQTz2+eiV//dyXJze6Na0tX59/a4nwv1+vytH1v7bX5N09lh7av3t3yeHmu/I+V/m1j9Ogo654f1K3LV6v+xqVZD7amrpXqfO7XPepMLUc6cmfcSP26F8fO0qnqjo7UrXmXkzfTHb4Lqe36SQrAFhx85WCrfb/9t/YH7Z+0r7YvHPjm1NmpL7ay/6/NP+37XeO3ja8Xr+SD/DCHJh0pAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB8Gtx8591r893uwtIeLKTxlAe8M7ZqkIr+K60xbY5lD2Tjk1GY2uyI+n2STbq3JhFzO8meSF2auzDXVMZUXRi+0k4aw3iSXNsjP3AH7IRTy9ffOnXznXe/snh9/o2FNxZunD575rUzna/O3T51ZbG7MNv/O+kogZ2w+jZg0pEAAAAAAAAAAAAAW7Ub/7xhzLRFbwL7CgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHwynX89+3spMjd7crbcvne30y2XQXm1ZTNJI0nxg6T4ODmX/pLpkeGKh83z6v2PfvXy+x92VsdqDto31vX7w79XVra5F716yUySffX60aa2NN6lkfF62wysrxjuYZmw44PEwaT9PwAA//+APAcg") getdents64(0xffffffffffffffff, &(0x7f0000000f80)=""/4096, 0x1035) bpf$BPF_PROG_TEST_RUN(0xa, 0x0, 0x0) r3 = openat$smackfs_cipso(0xffffffffffffff9c, &(0x7f0000000000)='/sys/fs/smackfs/cipso2\x00', 0x2, 0x0) readv(r3, &(0x7f0000000a00)=[{&(0x7f0000000740)=""/66, 0x42}], 0x1) 31.869631397s ago: executing program 1 (id=268): r0 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB="48040000", @ANYRES16=r2, @ANYBLOB="01e5ff000000000004003b1c210008000300", @ANYRES32=r1, @ANYBLOB="2c0433"], 0x448}}, 0x0) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000180)='net_prio.prioidx\x00', 0x275a, 0x0) write$binfmt_script(r4, &(0x7f0000000000), 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r4, 0x0) preadv(r4, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xffffffff000}], 0x5, 0x0, 0x0) write$sndseq(r3, &(0x7f00000003c0), 0x0) r5 = socket$nl_generic(0x10, 0x3, 0x10) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r6, &(0x7f0000000240), 0x3af4701e) sendfile(r5, r3, 0x0, 0x10000a007) 31.064270304s ago: executing program 0 (id=269): openat$binderfs(0xffffffffffffff9c, 0x0, 0x2, 0x0) prlimit64(0x0, 0xe, 0x0, 0x0) r0 = getpid() sched_setaffinity(0x0, 0x0, 0x0) sched_setscheduler(r0, 0x2, &(0x7f0000000200)=0x6) pipe2(&(0x7f00000000c0)={0xffffffffffffffff, 0xffffffffffffffff}, 0x0) fcntl$setpipe(r1, 0x407, 0x10005) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0xb635773f06ebbeee, 0x8031, 0xffffffffffffffff, 0x0) socketpair$unix(0x1, 0x2, 0x0, 0x0) sendmmsg$unix(0xffffffffffffffff, 0x0, 0x0, 0x0) recvmmsg(0xffffffffffffffff, 0x0, 0x0, 0x2, 0x0) bpf$MAP_CREATE(0x0, 0x0, 0x0) bpf$PROG_LOAD(0x5, 0x0, 0x0) syz_emit_vhci(&(0x7f0000000280)=ANY=[@ANYBLOB="042c"], 0x14) 30.740743824s ago: executing program 0 (id=270): bind$inet(0xffffffffffffffff, &(0x7f0000000080)={0x2, 0x0, @local}, 0x10) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000002100), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000002140)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_DEL_STATION(r1, &(0x7f0000002240)={0x0, 0x0, &(0x7f0000002200)={&(0x7f00000005c0)={0x1c, r2, 0xa29, 0x0, 0x0, {{}, {@val={0x8, 0x3, r3}, @void}}}, 0x1c}}, 0x0) r4 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) r5 = socket$nl_generic(0x10, 0x3, 0x10) r6 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000180)={'wlan1\x00', 0x0}) sendmsg$NL80211_CMD_FRAME(r5, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000340)={0x54, r6, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r7}, @void}}, [@NL80211_ATTR_FRAME={0x36, 0x33, @beacon={{{}, {}, @device_b, @device_b}, 0x0, @random, 0x0, @void, @void, @val={0x3, 0x1}, @void, @void, @val={0x5, 0x3, {0x0, 0x3c}}, @void, @void, @val={0x3c, 0x4, {0x0, 0xd}}, @void, @void, @void, @void}}]}, 0x54}}, 0x0) close(0x3) bpf$PROG_LOAD(0x5, 0x0, 0x0) bpf$MAP_CREATE(0x0, &(0x7f00000009c0)=@base={0x19, 0x4, 0x4, 0x1}, 0x93) 30.639207552s ago: executing program 3 (id=271): prlimit64(0x0, 0xe, &(0x7f0000000140)={0x8, 0x8b}, 0x0) sched_setscheduler(0x0, 0x1, &(0x7f0000000080)=0x7) r0 = getpid() sched_setscheduler(r0, 0x2, &(0x7f0000000200)=0x4) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0xb635773f06ebbeee, 0x8031, 0xffffffffffffffff, 0x0) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000200)={0xffffffffffffffff, 0xffffffffffffffff}) connect$unix(r1, &(0x7f000057eff8)=@file={0x0, './file0\x00'}, 0x6e) sendmmsg$unix(r2, &(0x7f0000000000), 0x651, 0x0) recvmmsg(r1, &(0x7f00000000c0), 0x10106, 0x2, 0x0) sched_setparam(0x0, &(0x7f0000000280)=0x8) madvise(&(0x7f0000c00000/0x400000)=nil, 0x400000, 0xe) madvise(&(0x7f0000e3a000/0x2000)=nil, 0x2000, 0x17) madvise(&(0x7f0000f21000/0x8000)=nil, 0x8000, 0x8) 29.736245058s ago: executing program 3 (id=272): openat$fb0(0xffffffffffffff9c, &(0x7f0000000040), 0x0, 0x0) sched_setscheduler(0x0, 0x1, 0x0) prlimit64(0x0, 0xe, &(0x7f0000000140)={0x8, 0x8b}, 0x0) sched_setscheduler(0x0, 0x1, &(0x7f0000000080)=0x7) sched_setscheduler(0x0, 0x2, &(0x7f0000000200)=0x6) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0xb635773f06ebbeee, 0x8031, 0xffffffffffffffff, 0x0) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000200)={0xffffffffffffffff, 0xffffffffffffffff}) connect$unix(r0, &(0x7f000057eff8)=@abs, 0x6e) sendmmsg$unix(r1, &(0x7f0000000000), 0x651, 0x0) recvmmsg(r0, &(0x7f00000000c0), 0x10106, 0x2, 0x0) connect$inet6(0xffffffffffffffff, &(0x7f0000000100)={0xa, 0x0, 0x0, @mcast2}, 0x1c) sendmsg$TIPC_NL_LINK_GET(0xffffffffffffffff, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000700)=ANY=[@ANYRES16, @ANYBLOB="030700000000000000000800000068000480040007801300010062726f6164636173742d"], 0x7c}}, 0x0) close(0x3) ioctl$TUNATTACHFILTER(0xffffffffffffffff, 0x401054d5, &(0x7f0000000040)={0x0, 0x0}) 28.712759317s ago: executing program 3 (id=273): prlimit64(0x0, 0xe, &(0x7f0000000140)={0x8, 0x8b}, 0x0) sched_setscheduler(0x0, 0x1, &(0x7f0000000080)=0x7) r0 = getpid() sched_setscheduler(r0, 0x2, &(0x7f0000000200)=0x6) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0xb635773f06ebbeee, 0x8031, 0xffffffffffffffff, 0x0) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000580)={0xffffffffffffffff, 0xffffffffffffffff}) connect$unix(r1, &(0x7f000057eff8)=@abs, 0x6e) sendmmsg$unix(r2, &(0x7f0000000000), 0x651, 0x0) recvmmsg(r1, &(0x7f00000000c0), 0x10106, 0x2, 0x0) syz_open_dev$tty1(0xc, 0x4, 0x1) r3 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_TIMEOUT_NEW(r3, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000380)={&(0x7f0000000440)=ANY=[@ANYBLOB="44000000000801010000000000000000000000000500030000000000050003002f0000000900010073797a31000000000600024000"], 0x44}}, 0x0) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, 0x0, 0x0) socket$inet6_tcp(0xa, 0x1, 0x0) 27.852246699s ago: executing program 3 (id=274): gettid() r0 = socket$inet6(0xa, 0x2, 0x0) setsockopt$sock_int(r0, 0x1, 0x10, &(0x7f0000000240), 0x4) openat$hwrng(0xffffffffffffff9c, &(0x7f0000000000), 0x0, 0x0) openat$cuse(0xffffffffffffff9c, &(0x7f0000000040), 0x2, 0x0) syz_mount_image$ext4(&(0x7f0000000200)='ext4\x00', &(0x7f0000000740)='./file0aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\x00', 0x10400, &(0x7f00000006c0), 0xfe, 0x246, &(0x7f0000000840)="$eJzs3T9oM2UcB/DvXRJf+75BXnURxD8gIloor5vg8rooFKQUEUGFioiL0gq1xa1xcnHQWaWTSxE3q6N0KS6K4FS1Q10ELQ4WBx0iybVS24ja1Jz0Ph+43l3vee73HLnvkyyXBGisq0muJ2klmU7SSVIcb3B3tVw93F2f2l5I+v0nfiqG7ar9ylG/K0l6SR5KslUWeamdrG4+s/fLzmP3vbnSuff9zaenJnqRh/b3dh8/eG/ujY9mH1z94qsf5opcT/dP13X+ihH/axfJLf9Fsf+Jol33CPgn5l/78OtB7m9Ncs8w/52UqV68t5Zv2OrkgXf/qu/bP355+yTHCpy/fr8zeA/s9YHGKZN0U5QzSartspyZqT7Df9O6XL68tPzq9ItLK4sv1D1TAeelm+w++smlj6+cyP/3rSr/wMU1yP+T8xvfDrYPWnWPBpiIO6rVIP/Tz63dH/mHxpF/aC75h+aSf2gu+Yfmkn9oLvmHC6xztNEbeVj+obnkH5pL/qG5jucfAGiW/qW6n0AG6lL3/AMAAAAAAAAAAAAAAAAAAJy2PrW9cLRMquZn7yT7jyRpj6rfGv4ecXLj8O/ln4tBsz8UVbexPHvXmCcY0wc1P31903f11v/8znrrry0mvdeTXGu3T99/xeH9d3Y3/83xzvNjFviXihP7Dz812fon/bZRb/3ZneTTwfxzbdT8U+a24Xr0/NM9/hXLZ/TKr2OeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgIn5PQAA//8PK23M") r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='memory.events.local\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f0000001400), 0x208e24b) fdatasync(r1) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) madvise(&(0x7f0000000000/0x600000)=nil, 0x600000, 0x1) openat$dir(0xffffffffffffff9c, &(0x7f0000002a00)='./file0\x00', 0x40, 0x0) madvise(&(0x7f0000000000/0x600000)=nil, 0x600003, 0x15) preadv(r1, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xfdef}], 0x1, 0x0, 0x0) 27.727336805s ago: executing program 3 (id=275): r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r1, &(0x7f00000003c0)={0x0, 0x0, &(0x7f0000000380)={0x0, 0x24}}, 0x0) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x2ba) bpf$PROG_LOAD(0x5, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000140)=ANY=[@ANYBLOB="3c0000001000850619fbb7c75150926b00000000", @ANYRES32=r2, @ANYBLOB="fe000000000000001c0012000c000100626f6e64000000000c0002000800010004"], 0x3c}}, 0x0) socket$nl_netfilter(0x10, 0x3, 0xc) r3 = socket$nl_route(0x10, 0x3, 0x0) r4 = socket$nl_route(0x10, 0x3, 0x0) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) sendmsg$nl_route(r4, &(0x7f0000000300)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000080)=ANY=[@ANYBLOB="3c0000001000010400"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000140012800b00010062617461647600000400028008000a00", @ANYRES32=r6], 0x3c}}, 0x0) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f00000004c0)=ANY=[@ANYBLOB="2000000011005504000000000000000010000000", @ANYRES32=r7], 0x20}}, 0x0) 4.080447148s ago: executing program 2 (id=288): sendmsg$alg(0xffffffffffffffff, 0x0, 0x20000000) recvmmsg(0xffffffffffffffff, 0x0, 0x0, 0x0, 0x0) setsockopt$inet_int(0xffffffffffffffff, 0x0, 0x0, 0x0, 0x0) socket$inet(0x2, 0x3, 0x5) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, 0x0, 0x0) setsockopt$bt_BT_VOICE(0xffffffffffffffff, 0x112, 0xb, &(0x7f0000000000)=0x63, 0x2) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) r0 = socket$tipc(0x1e, 0x5, 0x0) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, 0x0, 0x0) syz_genetlink_get_family_id$ethtool(&(0x7f0000000480), 0xffffffffffffffff) sendmsg$ETHTOOL_MSG_CHANNELS_SET(0xffffffffffffffff, 0x0, 0x0) bind$tipc(r0, &(0x7f00000001c0)=@name={0x1e, 0x2, 0x3, {{0x41, 0xfffffffe}}}, 0x10) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$tipc(&(0x7f0000000940), 0xffffffffffffffff) sendmsg$TIPC_CMD_SET_NODE_ADDR(r1, &(0x7f0000000a00)={0x0, 0x0, &(0x7f00000009c0)={&(0x7f0000000980)={0x24, r2, 0x1, 0x0, 0x0, {{}, {}, {0x8, 0x11, 0xffffffff}}}, 0x24}}, 0x0) 3.957287124s ago: executing program 2 (id=290): bpf$MAP_CREATE_CONST_STR(0x0, &(0x7f0000000000), 0x48) socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) socket$inet6_sctp(0xa, 0x801, 0x84) socketpair$tipc(0x1e, 0x2, 0x0, &(0x7f0000000500)) r0 = socket$packet(0x11, 0x3, 0x300) socket$packet(0x11, 0x3, 0x300) socket$packet(0x11, 0x3, 0x300) setsockopt$packet_int(r0, 0x107, 0xf, &(0x7f0000006ffc)=0x4000000000000200, 0x4) socketpair(0x1, 0x20000000000001, 0x0, &(0x7f0000000100)={0xffffffffffffffff, 0xffffffffffffffff}) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) bind$packet(r0, &(0x7f0000000000)={0x11, 0x0, r2, 0x1, 0x0, 0x6, @link_local}, 0x14) sendto$inet6(r0, &(0x7f0000000280)="020409fcec074802010e0200c52cf7c20675e005b02f0800eb2b2ff0dac8897c6b112002faffffff3066090cb600c5471d130a66321a54e7df305f80a88161b6fd8f24286a57c3feffff", 0xfc13, 0x800, 0x0, 0x2f) 3.954956434s ago: executing program 4 (id=291): bpf$MAP_CREATE(0x0, &(0x7f00000009c0)=@base={0x0, 0x0, 0x0, 0xc, 0x4302}, 0x48) prlimit64(0x0, 0xe, &(0x7f0000000140)={0x8, 0x20000008b}, 0x0) sched_setscheduler(0x0, 0x1, &(0x7f0000000080)=0x7) r0 = getpid() sched_setscheduler(r0, 0x2, &(0x7f0000000200)=0x7) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000200)={0xffffffffffffffff, 0xffffffffffffffff}) connect$unix(r1, &(0x7f0000000180)=@abs, 0x6e) sendmmsg$unix(r2, &(0x7f00000bd000), 0x318, 0x0) recvmmsg(r1, &(0x7f00000000c0), 0x10106, 0x2, 0x0) sched_setscheduler(0x0, 0x2, &(0x7f0000000200)=0x4) r3 = openat$vhost_vsock(0xffffffffffffff9c, &(0x7f00000015c0), 0x2, 0x0) syz_mount_image$fuse(0x0, &(0x7f00000000c0)='./file0\x00', 0x0, 0x0, 0x0, 0x0, 0x0) ioctl$VHOST_SET_VRING_BASE(r3, 0xaf01, 0x0) ioctl$VHOST_SET_MEM_TABLE(r3, 0x4008af03, &(0x7f00000004c0)={0x3c}) 3.881390733s ago: executing program 2 (id=292): r0 = openat$binderfs(0xffffffffffffff9c, &(0x7f00000000c0)='./binderfs/binder0\x00', 0x0, 0x0) ioctl$BINDER_SET_CONTEXT_MGR_EXT(r0, 0x4018620d, &(0x7f0000000540)) r1 = openat$binderfs(0xffffffffffffff9c, &(0x7f0000000180)='./binderfs/binder0\x00', 0x0, 0x0) ioctl$BINDER_WRITE_READ(r1, 0xc0306201, &(0x7f00000003c0)={0x8, 0x0, &(0x7f0000000340)=[@acquire], 0x0, 0x0, 0x0}) r2 = dup3(r1, r0, 0x0) r3 = syz_io_uring_setup(0x2ddd, &(0x7f00000006c0)={0x0, 0x0, 0x10100}, &(0x7f0000000100), &(0x7f0000000140)=0x0) syz_io_uring_setup(0x991, &(0x7f0000000080)={0x0, 0xb708, 0x0, 0x4}, &(0x7f0000000180)=0x0, &(0x7f0000000380)) syz_io_uring_submit(r5, r4, &(0x7f00000001c0)=@IORING_OP_POLL_ADD={0x6, 0x0, 0x0, @fd_index=0x4}) io_uring_enter(r3, 0x381b, 0x0, 0x0, 0x0, 0x0) r6 = openat$binderfs(0xffffffffffffff9c, &(0x7f0000000040)='./binderfs/binder0\x00', 0x0, 0x0) mmap$binder(&(0x7f0000ffc000/0x3000)=nil, 0x3000, 0x1, 0x11, r6, 0x0) setsockopt$MRT_INIT(0xffffffffffffffff, 0x0, 0xc8, 0x0, 0x0) ioctl$BINDER_SET_CONTEXT_MGR_EXT(r6, 0x4018620d, &(0x7f00000001c0)) ioctl$BINDER_WRITE_READ(r2, 0xc0306201, &(0x7f0000000480)={0x4c, 0x0, &(0x7f0000000580)=[@acquire, @transaction={0x40406300, {0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}], 0x0, 0x0, 0x0}) ioctl$BINDER_WRITE_READ(r0, 0xc0306201, &(0x7f0000000080)={0x4c, 0x0, &(0x7f00000004c0)=[@reply_sg={0x40486312, {0x0, 0x0, 0x0, 0x0, 0x31, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}], 0x4c, 0xfffffffffffffffc, &(0x7f0000000600)="fd026d6e8c6d7d4b8232398be4d8ab1fb3f7357c1094506013470098448866805968ba3387f5782010171d4f65cc2298be76206507000000000000008f50e600"/76}) 3.229984298s ago: executing program 2 (id=293): socket$netlink(0x10, 0x3, 0x0) write$binfmt_script(0xffffffffffffffff, 0x0, 0x0) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) syz_open_dev$sndpcmc(&(0x7f0000004240), 0x0, 0x0) bpf$MAP_CREATE(0x0, &(0x7f0000000100)=@base={0x15, 0x10, 0x2}, 0x48) fsopen(&(0x7f0000000040)='binfmt_misc\x00', 0x0) r0 = seccomp$SECCOMP_SET_MODE_FILTER_LISTENER(0x1, 0x0, &(0x7f0000000000)={0x1, &(0x7f0000000100)=[{0x6}]}) ioctl$AUTOFS_DEV_IOCTL_ASKUMOUNT(0xffffffffffffffff, 0xc018937d, &(0x7f0000000180)={{0x1, 0x1, 0x18}, '\x00'}) r1 = openat$ptmx(0xffffffffffffff9c, &(0x7f0000000140), 0x0, 0x0) ioctl$TIOCSETD(r1, 0x5423, &(0x7f0000000300)=0xe) r2 = openat$ppp(0xffffffffffffff9c, &(0x7f0000000040), 0x0, 0x0) ioctl$EVIOCGPROP(r2, 0x40047438, &(0x7f0000000180)=""/246) socket$inet6_tcp(0xa, 0x1, 0x0) pselect6(0x40, &(0x7f00000001c0), 0x0, &(0x7f00000002c0)={0x3ff}, 0x0, 0x0) close_range(r0, 0xffffffffffffffff, 0x0) 3.03049085s ago: executing program 4 (id=294): r0 = socket$kcm(0x10, 0x3, 0x10) prlimit64(0x0, 0xe, &(0x7f0000000140)={0x8, 0x8b}, 0x0) sched_setscheduler(0x0, 0x1, &(0x7f0000000080)=0x7) r1 = getpid() sched_setscheduler(r1, 0x2, &(0x7f0000000200)=0x4) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0xb635773f06ebbeee, 0x8031, 0xffffffffffffffff, 0x0) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000200)={0xffffffffffffffff, 0xffffffffffffffff}) connect$unix(r2, &(0x7f000057eff8)=@abs, 0x6e) sendmmsg$unix(r3, &(0x7f0000000000), 0x651, 0x0) recvmmsg(r2, &(0x7f00000000c0), 0x10106, 0x2, 0x0) sendmsg$kcm(r0, &(0x7f0000000000)={0x0, 0xd18c9b25, &(0x7f0000000080)=[{&(0x7f0000000040)="c00e03002a000b05d25a806c8c6f94f90424fc601100077a0a000312050282c137153e370e0c1180fc0b0c000300", 0x33fe0}], 0x1}, 0x0) syz_mount_image$f2fs(&(0x7f0000000040), &(0x7f0000000000)='./file2\x00', 0x0, &(0x7f0000000100)=ANY=[@ANYBLOB='nobarrier,mode=lfs,fsync_mode=strict\x00acl,\x00'], 0x1, 0x5509, &(0x7f0000000500)="$eJzs3E1rY+UXAPCTdjrv//kXceFuLgxCC5Mw6cugu6oz+IIdyqgLV5omachMkluaNK1duXApLvwmouDKpZ/BhWt34kJxJyi591anvoBDk8a2vx/cnPs8eXLuecIwcO4tCeDcmk9+/rEUN+JKRMxGxPWI7LxUHJm1PDwXETcjYuaJo1TM/z5xMSKuRsSNUfI8Z6l469Pbw1urP7zx01ffXLpw7bMvv53eroFpez4iutv5+V43j2krj4+K+dqwncXuyrCI+Rvdx8U4zeNeczPLsFc7XFfL4nIrX59u7/ZHcatTq49iq72VzW/38gv2h63DPNkHHtV2snGjuZnFdj/NYusgr2v/IP+/7aA/yPM0inwfZOljMDiM+Xxzv5nvZ/txFuu9QTGf500bzf1RHBaxuFzU004jq2PzON/0f9ub7d7ufjJs7vTbaS9ZrVRfqFTvlqs7aaM5aK6Ua93G3ZVkodUZLSsPmrXuWitNW51mpZ52F5OFVr1erlaThXvNzXatl1SrleXKnfLqYnF2O3n1wTtJp5EsjOLL7d7uoN3pJ1vpTpJ/YjFZqiy/uJjcqiZvrW8kGw/v31/fePu9e+8+eGn99VeKRX8pK1lYurO0VK7eKS9VF8/R/j8qih7j/uFYSk+3/NKk6gA4RfT/wDRMrv/feRgx+f4/9P9jcar63/Pe/09g/3AsT9n/AwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwdnw39/lr2cl8Pr5WzP+vmHqmGJciYiYifv0bs3HxSM7ZIs/cP6yf+1MNX5ciyzC6xqXiuBoRa8Xxy/8n/S0AAADA2fXFhzc/ybv1/GV+2gVxkvKbNjPX3x9TvlJEzM1/P6ZsM6OXZ8eULPv3fSH2x5Qtu4F1eUzJ8ltuF8aV7V+ZPRIuPxFKeZg50XIAAIATcbQTONkuBAAAgJP08bQLYDpKcfgo8/BZcPaX9388ELxyZAQAAACcQqVpFwAAAABMXNb/+/0/AAAAONvy3/8DAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB+Y+d+bhMHojgAPxu8sP+0aLX3bWVvUMaWsMc9RhSQJiiBtJAGqIHcUkIEER4HhYhEkTy2FfR9khnGgh8zCA7zRhoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADo0l21Xtxc/b5um7Pbt5NnNgAAAMA522q9qJ/MUv9rc/97c+tn0y8iooyIc2v3UXw6yRw1OdUrr69ejOE2ok44fMakub5ExJ/mevjR9bcAAAAAl2uzXM3Taj09zIYeEH1KRZvy299MeUVEVLP7TGnlIe9XprD69z2O/5nS6gLWNFNYKrmNc6W9S/13P1btps+aIjXl2+/PNncAAKBHo5Om31UIAAAAffo39AAYRhFPW5nHrcBJaprtvc8nPQAAAOADKoYeAAAAANC5ev3v/D8AAAC4bOn8PwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALq0rdaLzXI1b5uz27eTZzYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj+zPOwqEQBiEwd71ncnc/7DSoKmpSRUIH39jMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAb373l/8TU+NMMvfaWHoeSdZOja1TY+/cOPrD+Po1AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwsT+3KQCCQBgGt7Kvf+H9Dxsv6BkimAHhYRcFAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPiRe8YyTq0ztsRTVS3TxJ5xr6ojq8SZVeLKhT4ebP3DHwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC879/MaRxUHAPzNzM42rYprlBwiYsGDXmy6ra29iQclePBPEEK6rbFbf7Q52FKEXLxJzr2IHkUEJd76P/TcQi/11sMeKnhW5lfy0gZchc5ssp8PvHnfGYZ53zcLId99LwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGCfyft7cVYcBlWc1tfuPb61XvT3n+gLd7YfLBetiJM2kz4cXotPkqXoZKH9ZAAAAJgPWVPfhxAe5jurRZ8Oyvo/b+4pav4fXqjipp5/su5v+qb2L9rvvz16ZXegQTVO8dBLG+PR6adT6T27Wc62F//1jl755svvXrLyA0k/2np5kpfvM/nu7t0P+mV4rI1sAYD/41TT10Hz+1DRD7tMDIC50YsK76b+zwbd5gQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADQhslWeK6JkxDCcm8vLtx/fGv9oP7O9oPlpp2/fXs7fmbxiDyEcGljPDodFlqczWy7fuPmlbXxeHSt/eD1EEJXo79XT//KJ1PcHMIzSuNkR29+zoK0/rBnJZ/DEXT4QwkAgCMpr1tR1z/Md1aLa8liCH//uL/+fzOKw5T1/6NPz9+Lx4rr/2FrM5x9K5tXv1y5fuPm2xtX1y6PLo8+f+fM8N3h2Qvnzl1YKb8rqY5dpwkAAMAh1q9bXP+ni0+v/5+I4jBl/f/V98Nv4rEy9f+B9hb9us4EAABgvr108q8/kwOuJ/1++Hptc/PasDrunp+pjh2k+p8dq1tc/2eLXWcFAAAAtGGylexb/78YxWHK9f/nf3r1l/iZWQjheL3+f2r9i/HF9qYz09r4c+Ku5wgAAEC3jtctXv/Py/3/6e6WhzSE8NYbVVz/G8Cp6v/sw29/jseK9/+fbW+KMyldqt5H2S+F0FvqOiMAAACOsoWyDcr6/498Z/WzX0983Lf/HwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKBt/wQAAP//LVo+Qw==") r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='pids.current\x00', 0x275a, 0x0) fadvise64(r4, 0x0, 0xfffffffffffffffd, 0x2) accept4(0xffffffffffffffff, 0x0, 0x0, 0x0) 2.10871174s ago: executing program 2 (id=295): prlimit64(0x0, 0xe, &(0x7f0000000140)={0x8, 0x8b}, 0x0) sched_setscheduler(0x0, 0x1, &(0x7f0000000080)=0x7) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0xb635773f06ebbeee, 0x8031, 0xffffffffffffffff, 0x0) socketpair$unix(0x1, 0x2, 0x0, 0x0) sched_setaffinity(0x0, 0x8, &(0x7f0000000040)=0x10001) r0 = openat$hwrng(0xffffffffffffff9c, &(0x7f00000002c0), 0x0, 0x0) preadv(r0, &(0x7f0000000240)=[{&(0x7f0000033a80)=""/102386, 0xfffffd6e}], 0x1, 0x0, 0x0) connect$inet6(0xffffffffffffffff, &(0x7f0000000100)={0xa, 0x0, 0x0, @mcast2}, 0x1c) ioctl$UFFDIO_API(0xffffffffffffffff, 0xc018aa3f, 0x0) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) syz_emit_ethernet(0x6a, &(0x7f0000000000)={@local, @link_local, @void, {@ipv4={0x800, @udp={{0x5, 0x4, 0x0, 0x0, 0x5c, 0x0, 0x0, 0x0, 0x11, 0x0, @empty, @empty}, {0x0, 0x4e20, 0x48, 0x0, @wg=@cookie={0x3, 0x0, "6d4dfdeb8cf7bbfe143803bec2ce783e04cd32308cdd8dde", "c71cb8adfce542a4bc5a026c208fd0c45787e4aa384e3d26b21ea41cc128364c"}}}}}}, 0x0) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000140)={0x6, 0x3, &(0x7f0000000680)=ANY=[@ANYBLOB="1800000002000000000000000000000095"], 0x0}, 0x90) r1 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000480)={0x6, 0x3, &(0x7f0000000680)=ANY=[], &(0x7f00000002c0)='syzkaller\x00'}, 0x90) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000600)={r1, 0x5, 0xb68, 0x0, &(0x7f0000000000)='%', 0x0, 0xd01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, 0x48) 1.773113399s ago: executing program 4 (id=296): bpf$MAP_DELETE_ELEM(0x2, 0x0, 0x0) r0 = gettid() timer_create(0x0, &(0x7f0000000140)={0x0, 0x21, 0x800000000004, @tid=r0}, &(0x7f0000bbdffc)) bpf$MAP_CREATE(0xb00000000000000, 0x0, 0x0) timer_settime(0x0, 0x0, &(0x7f0000000000)={{0x0, 0x989680}, {0x0, 0x3938700}}, 0x0) pipe2(0x0, 0x10800) fsconfig$FSCONFIG_SET_PATH_EMPTY(0xffffffffffffffff, 0x4, &(0x7f00000000c0)='acl\x00', 0x0, 0xffffffffffffff9c) dup(0xffffffffffffffff) syz_mount_image$hfs(&(0x7f0000000540), &(0x7f00000000c0)='./file0\x00', 0x3004002, &(0x7f00000004c0)=ANY=[@ANYBLOB="747970653d704351852c636f6465706167653d63703836332c00000000746f723db84f85572c6469725f00000000010000003030303030313030303030303030303030303030313000"], 0x1, 0x2ce, &(0x7f0000000140)="$eJzs3T1v00Acx/HfOUmbPqi4DwiJBVSoBEtFgQGxBKGs7EwIaFKpIiqiLRKwUBAj4gWw8xZ4ESwg3gBMTLyAbkZ3vjhO6zgNNDFpvx8pkXO+s/+ns3P3t9RGAE6tu/Ufn278si8jlVSSdFsKJFWlsqSzOld9vrW7udtqNvIOVHIt7MsobmkO1VnfamY1te1cCy+0n8qaTZdhOKIouvOz6CBQOHf3ZwikSX8fuv3VEcc1LHvShaJjGLX0AJt97euF5goMBwDwH/Dzf+CniVlXZBQE0oqf9k/U/L9fdADH62brUFGU2yA1/7vVXWTs+J5xuzr5nkvh7P6gnSUeJZjKgc8Tiq+srgWm6ZdVuliCqY3NslbX36gR6K1qXqrakntvxJduW59olzNy0xy9j1bRvem4N25FeVA7pI3NVnPSbmTEvzjYGf+d+WK+mQcm1Ec1kvVfOTJ2mNxIhQdGKqjY+K/1PuKMa2Vryaf9tVot6Koy705y3p/B69PLanZGovYVNd95aLCXRJAXp2u1oO7HCnHv1vq0WsxqFSaferRa6mpV8lfC6vrTVu6jlOFod9F8MPfNsn7rs+qp9X9g41vRUe5MW8fV9FdG3J+J7JplVzM8NHN0bpeLSQTe5OCdw6De67FuaW7n5asnpVaruW03HmVsPJvdNr6k8k7KrDP8jZJy6mivUxJZr6PoqEeOhhn81WM9oP3+SErs7ZNV2d5lSUkw6mE6LRv1r8q7IMdnI4qkHruK/oLCKOyY9qD7gqmCA8Ko2XWXifM/t5L3qzqXItm3MGednp9kquuIa0kG170UXHDv0wNlcDO9M7jUGa/3yBldznXpinQ5VWiUe8bQx3lCmLq+6yHP/wEAAAAAAAAAAAAAAAAAAMbNKP7SoOg+AgAAAAAAAAAAAAAAAAAAAAAw7v7q93+z/ke8+/3fkN//BcbInwAAAP//0gN5ig==") chdir(&(0x7f0000000140)='./file0\x00') r1 = syz_open_dev$ndb(&(0x7f0000000000), 0x0, 0x0) socketpair$nbd(0x1, 0x1, 0x0, &(0x7f0000000080)={0xffffffffffffffff}) ioctl$NBD_SET_SOCK(r1, 0xab00, r2) ioctl$NBD_DO_IT(r1, 0xab03) ioctl$NBD_CLEAR_SOCK(r1, 0xab04) 1.196000995s ago: executing program 0 (id=277): bpf$MAP_GET_NEXT_KEY(0x2, 0x0, 0x0) r0 = socket$inet6(0xa, 0x80002, 0x0) setsockopt$inet6_mreq(r0, 0x29, 0x1b, &(0x7f0000000100)={@empty}, 0x14) r1 = openat$tun(0xffffffffffffff9c, &(0x7f0000000080), 0x0, 0x0) r2 = openat$tun(0xffffffffffffff9c, &(0x7f0000000240), 0x0, 0x0) ioctl$TUNSETIFF(r2, 0x400454ca, &(0x7f0000000040)={'syzkaller0\x00', 0x7101}) close(r1) socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000500)) ioctl$SIOCSIFHWADDR(r1, 0x8914, &(0x7f0000002280)={'syzkaller0\x00', @multicast}) r3 = bpf$PROG_LOAD(0x5, &(0x7f00000000c0)={0x11, 0xb, &(0x7f00000006c0)=ANY=[@ANYBLOB="18000000000000000000000095980000180100002020702500000000002020207b1af8ff00000000bfa100000000000007010000f0ffffffb702000005000000b703000000000000850000007300000095"], &(0x7f0000000200)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x90) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000000)={&(0x7f0000000140)='kmem_cache_free\x00', r3}, 0x10) r4 = openat$tun(0xffffffffffffff9c, &(0x7f0000000240), 0x1c1842, 0x0) ioctl$TUNSETIFF(r4, 0x400454ca, &(0x7f0000000040)={'syzkaller0\x00', 0x7101}) write$cgroup_devices(r4, &(0x7f00000000c0)=ANY=[@ANYBLOB="1e0306003c5c9801288463ff0f46667004"], 0xffdd) 1.118439596s ago: executing program 2 (id=297): bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000180)={0x0, 0x5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2}, 0x90) prctl$PR_SCHED_CORE(0x3e, 0x1, 0x0, 0x2, 0x0) r0 = getpid() process_vm_readv(r0, &(0x7f0000008400)=[{&(0x7f0000000300)=""/54, 0x7ffff000}, {&(0x7f0000006180)=""/152, 0x98}], 0x2, &(0x7f0000008640)=[{&(0x7f0000008480)=""/95, 0x7ffff000}], 0x286, 0x0) mkdirat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000040)='./cgroup.net/syz0\x00', 0x1ff) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000040), 0x200002, 0x0) r2 = openat$cgroup_devices(r1, &(0x7f0000000080)='devices.deny\x00', 0x2, 0x0) openat$cgroup_int(0xffffffffffffffff, 0x0, 0x2, 0x0) write$cgroup_devices(r2, &(0x7f00000003c0)={'b', ' *:* ', 'r\x00'}, 0x8) write$cgroup_devices(r2, &(0x7f0000000140)=ANY=[@ANYBLOB='b *:4\tw'], 0xa) r3 = openat$cgroup_devices(r1, &(0x7f0000000240)='devices.allow\x00', 0x2, 0x0) write$cgroup_devices(r3, &(0x7f0000000280)={'a', ' *:* ', 'wm\x00'}, 0x9) bpf$MAP_CREATE(0x0, &(0x7f0000000640)=@base={0x16, 0x0, 0x4, 0x1, 0x0, 0x1}, 0x48) r4 = bpf$PROG_LOAD(0x5, &(0x7f00000008c0)={0x6, 0xc, &(0x7f0000000440)=ANY=[], &(0x7f0000000240)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x90) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f00000004c0)={r4, 0xf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x50) 776.716512ms ago: executing program 4 (id=298): syz_mount_image$romfs(&(0x7f0000000040), &(0x7f0000000480)='./file0\x00', 0x0, &(0x7f0000000600)=ANY=[@ANYBLOB="00f3000000be5500200800000000c19e57fc847c52a19b0b247df0690ca7d757194d0335d8e8a065e069e1294e9f28bcee7085d4988309e751e0eec20f77d6c68ae8"], 0x1, 0x140, &(0x7f0000000340)="$eJzs2r9Kw1AUBvAzCEpHR6dAJQqa/1pXHcXN3SHU3DR4Y0oiSPsC4iQoXAdfQvAJfALJ6KZLB18icturplEhk9fh+y394LTpyblwp2PlWeqxwiLaHF4fTpbzLDV6wU6feSykmX0iMmQoq+rWpm9ePutlRfYPX9Br9W322V2PxcElS3jk624JAAAAAAAAAAAAAAAAAAAAAABaMjsqdAYivWAJj7xatRiNT0LOo7wgWtLUoV6moXa4iAl+J+cTzNU35Fh2ZUrEXi7r7vzvb1RYGwj+2pyvc5YOnWI0tpI0jKM4OvX9oOduue6270yf5TSfaN6rnqisHtzaOpn9dCQ7M2v7Zs9t9s3U+S+sJOLqvPlvX+ePgICA8BFaXp//3/T+mxDRYtB9lPef3c/4sSr+xTj1vj4A/OY9AAD//3cgO3s=") mkdirat(0xffffffffffffff9c, &(0x7f0000000040)='./file0\x00', 0x0) socket$inet(0x2, 0x0, 0x0) setsockopt$inet_sctp6_SCTP_RTOINFO(0xffffffffffffffff, 0x84, 0x0, &(0x7f0000000040)={0x0, 0x0, 0xd65}, 0x10) mkdirat(0xffffffffffffff9c, &(0x7f00000001c0)='./file0/file0\x00', 0x0) mount$tmpfs(0x0, &(0x7f0000000200)='./file0/file0\x00', 0x0, 0x0, 0x0) mkdirat(0xffffffffffffff9c, &(0x7f00000000c0)='./file0\x00', 0x0) mount$bind(0x0, &(0x7f0000000440)='./file0/../file0\x00', 0x0, 0x0, 0x0) mount$bind(0x0, &(0x7f0000000140)='./file0\x00', 0x0, 0x0, 0x0) mount$bind(0x0, &(0x7f0000000040)='./file0/file0\x00', 0x0, 0x0, 0x0) mount$bind(0x0, &(0x7f0000000180)='./file0\x00', 0x0, 0x0, 0x0) mount(0x0, &(0x7f0000000040)='./file0/../file0/../file0\x00', 0x0, 0x0, 0x0) mount(0x0, &(0x7f0000000040)='./file0\x00', 0x0, 0x0, 0x0) r0 = open_tree(0xffffffffffffff9c, &(0x7f0000000640)='\x00', 0x89901) move_mount(r0, &(0x7f0000000140)='.\x00', 0xffffffffffffff9c, &(0x7f0000000180)='./file0\x00', 0x0) 610.488149ms ago: executing program 4 (id=299): r0 = socket$inet6(0xa, 0x3, 0x3c) connect$inet6(r0, 0x0, 0x0) openat$sw_sync(0xffffffffffffff9c, &(0x7f0000000000), 0x0, 0x0) prlimit64(0x0, 0xe, 0x0, 0x0) sched_setscheduler(0x0, 0x2, 0x0) syz_usb_connect$printer(0x3, 0x36, &(0x7f0000000240)={{0x12, 0x1, 0x3bbc1747421f04ec, 0x0, 0x0, 0x0, 0x40, 0x525, 0xa4a8, 0x40, 0x1, 0x2, 0x3, 0x1, [{{0x9, 0x2, 0x24, 0x1, 0x1, 0x3, 0x60, 0x29, [{{0x9, 0x4, 0x0, 0x6, 0x2, 0x7, 0x1, 0x1, 0x1, "", {{{0x9, 0x5, 0x1, 0x2, 0x3ff, 0x4c, 0x1, 0x9}}, [{{0x9, 0x5, 0x82, 0x2, 0x8, 0x0, 0x40, 0x7}}]}}}]}}]}}, &(0x7f0000000ec0)={0xa, &(0x7f0000000300)={0xa, 0x6, 0x200, 0xa, 0x2, 0xc, 0x20, 0x7}, 0xc1, &(0x7f0000000480)={0x5, 0xf, 0xc1, 0x4, [@ptm_cap={0x3}, @ext_cap={0x7, 0x10, 0x2, 0x8, 0x8, 0x4, 0x1}, @generic={0x9a, 0x10, 0xa, "31260ba9f98f83387f513ef93b80c9c68cdf4bec9f2f340715e919e578dbe13f5a33bbb396d42e69bf7bb7e6fc49531dd13ae97a8f0b6a0358988aa4de875a4b609e33b46c08a13302fbc18a949972fc33425b15769f6c9bf499144c8ea5a9e3c70b71774c0007e756e77ef0da6f7007385cef0d4ca744cf3da3307798fef7c99877a87540cf4ba91235d6ce4648b87e4f7f31bcb0b2ec"}, @ssp_cap={0x18, 0x10, 0xa, 0x1, 0x3, 0x2, 0xf0f, 0x0, [0xc0, 0xff0000, 0xff3f00]}]}, 0x6, [{0x2, &(0x7f0000000380)=@string={0x2}}, {0x28, &(0x7f0000000400)=@string={0x28, 0x3, "eb38b7a16b713c2831991658340e255d404025c2acc7e6e38f23d02a5e8516211356d392d4dd"}}, {0x4, &(0x7f0000000580)=@lang_id={0x4, 0x3, 0x419}}, {0x0, 0x0}, {0xf5, &(0x7f0000000680)=ANY=[@ANYBLOB="f5b003c10c3b29b36b5bcc476fe992c5f033b2ea740561348f638bb51116d6982c01214eaf3b10f1ff840c9d3c565e15635566cf4d58a86d3e9812c266e68123a63ae77bbd728a9d8f90779850e9061ed8e1975d0fa31b1fd1469854569827dadcff389caa5a66c3cb1f9ff1483062f557c839d50eb523d4622aab1b6fa414eb925704dbe90100000000000000154679ef23d3beb05af4d0e78aabfb9b58f0385adc7f8e4e8d940a527fa1da0b9dea6a5d33b0582ab7ef2e5b6164c9b64032c1385719fd765ff3668c8b0eb4a839c5c257b730e0769daea10da9de4ee6bc2e65ef773adcb10334f095f13350b55958371a19"]}, {0x0, 0x0}]}) recvmmsg(0xffffffffffffffff, &(0x7f00000000c0), 0x0, 0x2, 0x0) shutdown(0xffffffffffffffff, 0x0) shutdown(0xffffffffffffffff, 0x0) r1 = epoll_create1(0x0) epoll_ctl$EPOLL_CTL_ADD(r1, 0x1, 0xffffffffffffffff, &(0x7f00000001c0)) socket$inet6_sctp(0xa, 0x5, 0x84) r2 = accept$packet(0xffffffffffffffff, &(0x7f0000000b80)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @link_local}, &(0x7f0000000f80)=0x14) accept$packet(r2, &(0x7f0000000fc0)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, 0x0) sendmmsg$inet(r0, &(0x7f0000000000)=[{{0x0, 0x0, &(0x7f0000000180), 0x0, 0x0, 0x0, 0x900}}], 0x1, 0x0) 0s ago: executing program 4 (id=300): syz_mount_image$hfsplus(&(0x7f00000000c0), &(0x7f0000000380)='\x13\x13w\xc5\xfc5\xd4\x14T\xd5\xd4\x1d)\xad\x1a`)Y\x81F\xe6\xbe\x16nA\xad\r\xbd@T\x03<\x9f3\xbb\xda\x82$\xa2\xf3\xd7r\xe7cnH\xb3<\xbfp\x83r\xe8\xf1\xb9\x93>\xc5\x12wC\xbe\"\x06 \x9e\xf0-\xf9\xcb\xf2\xf6\xe8\x80\xd38/\x00', 0x0, &(0x7f0000000040)={[{@part={'part', 0x3d, 0x9}}, {@nodecompose}]}, 0x3, 0x6b4, &(0x7f00000015c0)="$eJzs3U9sHFcdB/DvbJx1NkiJ2yZpQEi1GqmCRiR2ViVBQmpACHKIUASXXq3ESaw4aeW4KK0Q2QAFiVNPqAcORSgcekIIIZUTopyRkLhwyj1SbxxyAFzN7Ox6Ym8cO3+8bvv5SON5b9+8937z6+zMzmyjDfC5dea17O6lyJmjZ2+U9Tu3u4t3bnevDspJJpO0kon+KkUnKT5KTqe/5Ivli/VwxYPmeeXuh8XEex90+7WJeqm2b23Ub52RW/aSPcPKriTT/eJ/Nz1s083VYjnO+dXxHlExjLtM2JFB4mDcVtbprTa2Htp98+9bYMe62b9urjOV7E3/6lp+Dkh9dnj4mWH8Njw39bYvDgAAAHhaRt7LN+2/l3u5kX3bEw4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB8NhT93wws6qU1KE+nGPz+f7vxm/rtMYf7mN65WK1+sH/cgQAAAAAAAADAY3nhXu7lRvYN6itF9Z3/i1XlQPX3C3kz1zOfpRzLjcxlOctZymySqcZA7Rtzy8tLs+t7/jplz5WVlZt1zxMje55oRrU7vbWBjvo/DdZtBAAAAAAAAACfWz/NmdXv/wEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYCcokl39VbUcGJSn0ppIsidJu5gebt4ea7CPrjUo/HW8cQAAAMB26NTrfcX/+4WVorrnP1Td9+/Jm7mW5SxkOYuZz4XqWUD/rr/1z1538c7t7tVyuW/MS/u3Hke7viffVdVGzTxTbXFw2ONMvpsf5mimcy5LWciPMpflzGc636lKcykyVT+9mLpzu5NBrOvizen7aufWxvZCo1zGd7iKpJOLWahiO5bz7UHog+cKhxuz/bmdrJnx1sdlZK/WNpmjC/W63KN36/XOMFXt+e5hRmbq3JfZeKaZ9/W5/9bHjzXTbFrDZ1AHVmcpq2tneqSc763XZa5/MSLn7+7ZUvgb2eKjtPsz0ftVWRscfYc2znny1X/97dzl1rUrly9eP7pzDqNHtPaY6DYy8fymMrFYZqL3GJl4ckfBY2nX2eifRbd2tnyx6rsvC/l+Xs+FzOdkZjKbU5nJN3Ii3Zxo5PXgxnmt3mutrb3XjnylLpTXpF82rk3bZvJBDWVen2nktXmmm6ramq+sZunZTWSpaKefpWLN6eXfI0OZ+FJdKOf4WfNJ9titzcRsIxPPbZyJ3/5vJcn1xWtXli7PvbHJ+V6q12XS3rk/eb97Iju0dfXulsfLs+V/rPQvG82jo2x7btC2Jl/t+huXiXqwYdv39leDHhy2PeydWo506NaIkeq250fNkm7VdrjRdt+nnLyexeGnEAB2sL0v72137nb+0Xm/8/PO5c7ZPd+ePDX55XZ2/33iL7v+0Pp965vFy3k/P8m+cUcKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACfBdffevvK3OLi/NIOLKT1hAe8NbJpkIr+K+2dse+f1sLkRkfUH5Ns0L09jpg7SXZE6jKxDXNNZkTT2eErnaQ1jCfJlR3yA3fA03B8+eobx6+/9fbXFq7OXZq/NH/txKmTr57sfn325vGLC4vzM/2/444SeBpWPwaMOxIAAAAAAAAAAABgs7bjnzeMmLbojWFfAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgE+nM69ldy9FZmeOzZT1O7e7i+UyKK9uOZGklaT4cVJ8lJxOf8lUY7jiQfO8cvfD37z03gfd1bEmBtu31vT7039WVra4F716yXSSXfX64SY3Nd75xni9LQbWVwz3sEzYkUHiYNw+CQAA//9+ggdV") r0 = socket$kcm(0x10, 0x2, 0x4) sendmsg$inet(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)=[{&(0x7f0000000080)="5c00000015006b03000000d86e6c1d2002117ea6ea9228016507001b32d00af32c6e020075f800250002000f00000017d34460bc24eab556a713251e6182949a36c23d3b48dfd8cdbf3367b4fa51f60a64c9f4d49304000000000000", 0x5c}], 0x1, 0x0, 0x0, 0x1f00c00e}, 0x0) recvmsg(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000001800)=[{&(0x7f00000004c0)=""/180, 0xb4}], 0x1}, 0x0) r1 = openat$ptmx(0xffffffffffffff9c, &(0x7f0000000040), 0x41, 0x0) write$binfmt_aout(r1, &(0x7f00000000c0)=ANY=[], 0xff2e) ioctl$TCSETS(r1, 0x40045431, &(0x7f0000000dc0)={0x0, 0x0, 0x0, 0x0, 0x0, "0062ba7d82000000000000000000f7ffffff00"}) r2 = syz_open_pts(r1, 0x0) r3 = dup3(r2, r1, 0x0) ioctl$TIOCSETD(r1, 0x5423, &(0x7f0000000080)=0x7) ioctl$TIOCSTI(r3, 0x5412, &(0x7f0000000000)=0xdd) sendmmsg$alg(0xffffffffffffffff, 0x0, 0x0, 0x0) socket$alg(0x26, 0x5, 0x0) bind$alg(0xffffffffffffffff, 0x0, 0x0) kernel console output (not intermixed with test programs): T43] veth1_vlan: left promiscuous mode [ 118.101859][ T43] veth0_vlan: left promiscuous mode [ 119.134908][ T7369] loop4: detected capacity change from 0 to 262144 [ 119.141002][ T7369] BTRFS: device fsid 7e32c2af-f87a-45a1-bcba-64dea7c56a53 devid 1 transid 8 /dev/loop4 (7:4) scanned by syz.4.139 (7369) [ 119.154538][ T7369] BTRFS info (device loop4): first mount of filesystem 7e32c2af-f87a-45a1-bcba-64dea7c56a53 [ 119.157463][ T7369] BTRFS info (device loop4): using xxhash64 (xxhash64-generic) checksum algorithm [ 119.159814][ T7369] BTRFS info (device loop4): using free-space-tree [ 119.169089][ T660] BTRFS warning (device loop4): checksum verify failed on logical 22036480 mirror 1 wanted 0x23e101be1e001a29 found 0x98e2f59226e63d74 level 0 [ 119.180455][ T7369] BTRFS info (device loop4): read error corrected: ino 0 off 22036480 (dev /dev/loop4 sector 43040) [ 119.185008][ T7369] BTRFS info (device loop4): read error corrected: ino 0 off 22040576 (dev /dev/loop4 sector 43048) [ 119.188552][ T7369] BTRFS info (device loop4): read error corrected: ino 0 off 22044672 (dev /dev/loop4 sector 43056) [ 119.191644][ T7369] BTRFS info (device loop4): read error corrected: ino 0 off 22048768 (dev /dev/loop4 sector 43064) [ 119.195504][ T303] BTRFS warning (device loop4): checksum verify failed on logical 30670848 mirror 1 wanted 0xe9f08ec94c425425 found 0x1a4a9216e61c07c8 level 0 [ 119.201713][ T7369] BTRFS info (device loop4): read error corrected: ino 0 off 30670848 (dev /dev/loop4 sector 76288) [ 119.204902][ T7369] BTRFS info (device loop4): read error corrected: ino 0 off 30674944 (dev /dev/loop4 sector 76296) [ 119.215702][ T7369] BTRFS info (device loop4): read error corrected: ino 0 off 30679040 (dev /dev/loop4 sector 76304) [ 119.221963][ T7369] BTRFS info (device loop4): read error corrected: ino 0 off 30683136 (dev /dev/loop4 sector 76312) [ 119.233094][ T229] BTRFS warning (device loop4): checksum verify failed on logical 30457856 mirror 1 wanted 0x402e75f1de9ccfe6 found 0x64dad595b87aeca8 level 0 [ 119.238412][ T7369] BTRFS info (device loop4): read error corrected: ino 0 off 30457856 (dev /dev/loop4 sector 75872) [ 119.248971][ T7369] BTRFS info (device loop4): read error corrected: ino 0 off 30461952 (dev /dev/loop4 sector 75880) [ 119.271471][ T7369] fs-verity: sha512 using implementation "sha512-arm64" [ 119.283595][ T7369] BTRFS info (device loop4): setting compat-ro feature flag for VERITY (0x4) [ 119.321420][ T7054] BTRFS info (device loop4): last unmount of filesystem 7e32c2af-f87a-45a1-bcba-64dea7c56a53 [ 119.677085][ T7391] loop4: detected capacity change from 0 to 512 [ 119.682308][ T7391] EXT4-fs (loop4): encrypted files will use data=ordered instead of data journaling mode [ 119.687651][ T7391] EXT4-fs (loop4): 1 truncate cleaned up [ 119.690991][ T7391] EXT4-fs (loop4): mounted filesystem 00000000-0000-0000-0000-000000000000 r/w without journal. Quota mode: none. [ 120.017028][ T6405] Bluetooth: hci4: command tx timeout [ 120.121186][ T43] team0 (unregistering): Port device team_slave_1 removed [ 120.353904][ T43] team0 (unregistering): Port device team_slave_0 removed [ 124.751100][ T43] team0 (unregistering): Port device team_slave_1 removed [ 124.948776][ T43] team0 (unregistering): Port device team_slave_0 removed [ 125.938236][ T2335] ieee802154 phy0 wpan0: encryption failed: -22 [ 125.940020][ T2335] ieee802154 phy1 wpan1: encryption failed: -22 [ 127.244483][ T7303] bridge0: port 1(bridge_slave_0) entered blocking state [ 127.246539][ T7303] bridge0: port 1(bridge_slave_0) entered disabled state [ 127.248746][ T7303] bridge_slave_0: entered allmulticast mode [ 127.250832][ T7303] bridge_slave_0: entered promiscuous mode [ 127.255258][ T7303] bridge0: port 2(bridge_slave_1) entered blocking state [ 127.257283][ T7303] bridge0: port 2(bridge_slave_1) entered disabled state [ 127.259186][ T7303] bridge_slave_1: entered allmulticast mode [ 127.261331][ T7303] bridge_slave_1: entered promiscuous mode [ 127.275685][ T7303] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 127.294301][ T7303] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 127.346131][ T6601] EXT4-fs (loop0): unmounting filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09. [ 127.392369][ T7131] 8021q: adding VLAN 0 to HW filter on device team0 [ 127.459087][ T44] bridge0: port 1(bridge_slave_0) entered blocking state [ 127.461242][ T44] bridge0: port 1(bridge_slave_0) entered forwarding state [ 127.472015][ T53] Bluetooth: hci0: unexpected cc 0x0c03 length: 249 > 1 [ 127.483815][ T53] Bluetooth: hci0: unexpected cc 0x1003 length: 249 > 9 [ 127.495628][ T44] bridge0: port 2(bridge_slave_1) entered blocking state [ 127.497699][ T44] bridge0: port 2(bridge_slave_1) entered forwarding state [ 127.500042][ T53] Bluetooth: hci0: unexpected cc 0x1001 length: 249 > 9 [ 127.504327][ T53] Bluetooth: hci0: unexpected cc 0x0c23 length: 249 > 4 [ 127.509740][ T53] Bluetooth: hci0: unexpected cc 0x0c25 length: 249 > 3 [ 127.513218][ T53] Bluetooth: hci0: unexpected cc 0x0c38 length: 249 > 2 [ 127.521420][ T7303] team0: Port device team_slave_0 added [ 127.572359][ T7211] netdevsim netdevsim2 netdevsim0: renamed from eth0 [ 127.581504][ T7054] EXT4-fs (loop4): unmounting filesystem 00000000-0000-0000-0000-000000000000. [ 127.587489][ T7303] team0: Port device team_slave_1 added [ 127.594545][ T7131] hsr0: Slave A (hsr_slave_0) is not up; please bring it up to get a fully working HSR network [ 127.615759][ T7131] hsr0: Slave B (hsr_slave_1) is not up; please bring it up to get a fully working HSR network [ 127.659882][ T7211] netdevsim netdevsim2 netdevsim1: renamed from eth1 [ 127.668382][ T7211] netdevsim netdevsim2 netdevsim2: renamed from eth2 [ 127.763980][ T7211] netdevsim netdevsim2 netdevsim3: renamed from eth3 [ 127.800426][ T7303] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 127.802375][ T7303] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 127.818022][ T7303] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 127.852123][ T7303] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 127.854011][ T7303] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 127.872787][ T6405] Bluetooth: hci2: unexpected cc 0x0c03 length: 249 > 1 [ 127.876471][ T7303] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 127.878040][ T6405] Bluetooth: hci2: unexpected cc 0x1003 length: 249 > 9 [ 127.887598][ T6405] Bluetooth: hci2: unexpected cc 0x1001 length: 249 > 9 [ 127.892502][ T6405] Bluetooth: hci2: unexpected cc 0x0c23 length: 249 > 4 [ 127.894927][ T6405] Bluetooth: hci2: unexpected cc 0x0c25 length: 249 > 3 [ 127.904141][ T6405] Bluetooth: hci2: unexpected cc 0x0c38 length: 249 > 2 [ 127.932772][ T7131] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 128.010290][ T7394] chnl_net:caif_netlink_parms(): no params data found [ 128.079134][ T7303] hsr_slave_0: entered promiscuous mode [ 128.127514][ T7303] hsr_slave_1: entered promiscuous mode [ 128.159894][ T7303] debugfs: Directory 'hsr0' with parent 'hsr' already present! [ 128.161932][ T7303] Cannot create hsr debugfs directory [ 128.172682][ T7131] veth0_vlan: entered promiscuous mode [ 128.182066][ T7211] 8021q: adding VLAN 0 to HW filter on device bond0 [ 128.220257][ T7131] veth1_vlan: entered promiscuous mode [ 128.225057][ T7211] 8021q: adding VLAN 0 to HW filter on device team0 [ 128.304176][ T7394] bridge0: port 1(bridge_slave_0) entered blocking state [ 128.306094][ T7394] bridge0: port 1(bridge_slave_0) entered disabled state [ 128.308557][ T7394] bridge_slave_0: entered allmulticast mode [ 128.310961][ T7394] bridge_slave_0: entered promiscuous mode [ 128.314876][ T7394] bridge0: port 2(bridge_slave_1) entered blocking state [ 128.318308][ T7394] bridge0: port 2(bridge_slave_1) entered disabled state [ 128.320250][ T7394] bridge_slave_1: entered allmulticast mode [ 128.322669][ T7394] bridge_slave_1: entered promiscuous mode [ 128.384871][ T7131] veth0_macvtap: entered promiscuous mode [ 128.389993][ T7131] veth1_macvtap: entered promiscuous mode [ 128.399505][ T13] bridge0: port 1(bridge_slave_0) entered blocking state [ 128.401417][ T13] bridge0: port 1(bridge_slave_0) entered forwarding state [ 128.410116][ T7394] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 128.430347][ T7131] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 128.433253][ T7131] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 128.436099][ T7131] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 128.439548][ T7131] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 128.442145][ T7131] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 128.444862][ T7131] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 128.448770][ T7131] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 128.460147][ T13] bridge0: port 2(bridge_slave_1) entered blocking state [ 128.462063][ T13] bridge0: port 2(bridge_slave_1) entered forwarding state [ 128.468462][ T7394] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 128.491722][ T7394] team0: Port device team_slave_0 added [ 128.495146][ T7131] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 128.507012][ T7131] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 128.509949][ T7131] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 128.513390][ T7131] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 128.516025][ T7131] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 128.519211][ T7131] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 128.523234][ T7131] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 128.534137][ T7394] team0: Port device team_slave_1 added [ 128.554014][ T7131] netdevsim netdevsim1 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 128.556325][ T7131] netdevsim netdevsim1 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 128.559206][ T7131] netdevsim netdevsim1 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 128.561579][ T7131] netdevsim netdevsim1 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 128.612755][ T7394] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 128.614600][ T7394] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 128.624155][ T7394] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 128.629311][ T7394] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 128.631280][ T7394] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 128.641059][ T7394] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 129.273522][ T7303] netdevsim netdevsim3 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 129.616854][ T53] Bluetooth: hci0: command tx timeout [ 129.660685][ T43] netdevsim netdevsim0 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 129.759646][ T7303] netdevsim netdevsim3 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 129.808720][ T43] netdevsim netdevsim0 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 129.858425][ T7394] hsr_slave_0: entered promiscuous mode [ 129.907143][ T7394] hsr_slave_1: entered promiscuous mode [ 129.936926][ T53] Bluetooth: hci2: command tx timeout [ 129.946937][ T7394] debugfs: Directory 'hsr0' with parent 'hsr' already present! [ 129.948956][ T7394] Cannot create hsr debugfs directory [ 130.010694][ T7303] netdevsim netdevsim3 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 130.019296][ T7415] chnl_net:caif_netlink_parms(): no params data found [ 130.068722][ T43] netdevsim netdevsim0 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 130.168829][ T7303] netdevsim netdevsim3 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 130.176626][ T662] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 130.183192][ T662] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 130.242870][ T43] netdevsim netdevsim0 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 130.287946][ T660] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 130.288691][ T7415] bridge0: port 1(bridge_slave_0) entered blocking state [ 130.291982][ T7415] bridge0: port 1(bridge_slave_0) entered disabled state [ 130.294046][ T7415] bridge_slave_0: entered allmulticast mode [ 130.296388][ T660] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 130.296401][ T7415] bridge_slave_0: entered promiscuous mode [ 130.299016][ T7415] bridge0: port 2(bridge_slave_1) entered blocking state [ 130.302038][ T7415] bridge0: port 2(bridge_slave_1) entered disabled state [ 130.304095][ T7415] bridge_slave_1: entered allmulticast mode [ 130.306284][ T7415] bridge_slave_1: entered promiscuous mode [ 130.341786][ T7211] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 130.368988][ T7415] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 130.376153][ T7415] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 130.446398][ T7415] team0: Port device team_slave_0 added [ 130.485741][ T7415] team0: Port device team_slave_1 added [ 130.514461][ T7211] veth0_vlan: entered promiscuous mode [ 130.529115][ T7415] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 130.531044][ T7415] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 130.538857][ T7415] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 130.552003][ T7303] netdevsim netdevsim3 netdevsim0: renamed from eth0 [ 130.579652][ T7211] veth1_vlan: entered promiscuous mode [ 130.588515][ T7415] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 130.590426][ T7415] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 130.599626][ T7415] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 130.681390][ T43] netdevsim netdevsim4 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 130.705639][ T7303] netdevsim netdevsim3 netdevsim1: renamed from eth1 [ 130.711246][ T7303] netdevsim netdevsim3 netdevsim2: renamed from eth2 [ 130.720953][ T7211] veth0_macvtap: entered promiscuous mode [ 130.724362][ T7211] veth1_macvtap: entered promiscuous mode [ 130.746791][ T7303] netdevsim netdevsim3 netdevsim3: renamed from eth3 [ 130.838473][ T43] netdevsim netdevsim4 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 130.927100][ T7415] hsr_slave_0: entered promiscuous mode [ 130.977165][ T7415] hsr_slave_1: entered promiscuous mode [ 131.106929][ T7415] debugfs: Directory 'hsr0' with parent 'hsr' already present! [ 131.109506][ T7415] Cannot create hsr debugfs directory [ 131.603685][ T43] netdevsim netdevsim4 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 131.698164][ T53] Bluetooth: hci0: command tx timeout [ 131.968403][ T43] netdevsim netdevsim4 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 132.016873][ T53] Bluetooth: hci2: command tx timeout [ 132.082054][ T7211] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 132.086836][ T7211] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 132.089526][ T7211] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 132.092404][ T7211] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 132.094988][ T7211] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 132.107264][ T7211] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 132.110120][ T7211] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 132.120289][ T7211] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 132.123974][ T7211] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 132.170910][ T7211] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 132.173871][ T7211] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 132.176405][ T7211] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 132.186994][ T7211] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 132.189662][ T7211] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 132.192455][ T7211] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 132.195007][ T7211] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 132.204499][ T7211] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 132.208420][ T7211] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 132.247602][ T7211] netdevsim netdevsim2 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 132.250784][ T7211] netdevsim netdevsim2 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 132.253272][ T7211] netdevsim netdevsim2 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 132.255828][ T7211] netdevsim netdevsim2 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 132.286511][ T7303] 8021q: adding VLAN 0 to HW filter on device bond0 [ 132.359791][ T43] bridge_slave_1: left allmulticast mode [ 132.361376][ T43] bridge_slave_1: left promiscuous mode [ 132.362924][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 132.366435][ T43] bridge_slave_0: left allmulticast mode [ 132.368304][ T43] bridge_slave_0: left promiscuous mode [ 132.369960][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 132.376537][ T43] bridge_slave_1: left allmulticast mode [ 132.378437][ T43] bridge_slave_1: left promiscuous mode [ 132.380035][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 132.383253][ T43] bridge_slave_0: left allmulticast mode [ 132.384853][ T43] bridge_slave_0: left promiscuous mode [ 132.386604][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 132.391630][ T43] bridge_slave_1: left allmulticast mode [ 132.393112][ T43] bridge_slave_1: left promiscuous mode [ 132.394566][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 132.404256][ T43] bridge_slave_0: left allmulticast mode [ 132.405806][ T43] bridge_slave_0: left promiscuous mode [ 132.409209][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 132.799097][ T7482] binder: 7481:7482 tried to acquire reference to desc 0, got 1 instead [ 132.802704][ T7482] binder: 7481:7482 got transaction to context manager from process owning it [ 132.805144][ T7482] binder: 7481:7482 transaction call to 7481:0 failed 19/29201/-22, size 0-0 line 3133 [ 132.808489][ T7482] binder: 7481:7482 ioctl c0306201 200002c0 returned -14 [ 132.811608][ T6448] binder: release 7481:7482 transaction 18 out, still active [ 132.813951][ T6448] binder: undelivered TRANSACTION_ERROR: 29201 [ 132.823035][ T6448] binder: send failed reply for transaction 18, target dead [ 132.850649][ T7484] loop1: detected capacity change from 0 to 128 [ 132.855096][ T7484] EXT4-fs (loop1): mounted filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09 ro without journal. Quota mode: none. [ 133.777084][ T53] Bluetooth: hci0: command tx timeout [ 134.107352][ T53] Bluetooth: hci2: command tx timeout [ 135.629976][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 135.670216][ T43] bond0 (unregistering): (slave bond_slave_1): Releasing backup interface [ 135.709823][ T43] bond0 (unregistering): Released all slaves [ 135.857344][ T53] Bluetooth: hci0: command tx timeout [ 136.177402][ T53] Bluetooth: hci2: command tx timeout [ 136.410313][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 136.449550][ T43] bond0 (unregistering): (slave bond_slave_1): Releasing backup interface [ 136.499543][ T43] bond0 (unregistering): Released all slaves [ 137.239442][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 137.279537][ T43] bond0 (unregistering): (slave bond_slave_1): Releasing backup interface [ 137.319585][ T43] bond0 (unregistering): Released all slaves [ 137.346145][ T7394] netdevsim netdevsim0 netdevsim0: renamed from eth0 [ 137.361825][ T7131] EXT4-fs (loop1): unmounting filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09. [ 137.453102][ T7394] netdevsim netdevsim0 netdevsim1: renamed from eth1 [ 137.566268][ T7303] 8021q: adding VLAN 0 to HW filter on device team0 [ 137.584693][ T7394] netdevsim netdevsim0 netdevsim2: renamed from eth2 [ 137.603242][ T7394] netdevsim netdevsim0 netdevsim3: renamed from eth3 [ 137.629716][ T44] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 137.631792][ T44] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 137.663465][ T44] bridge0: port 1(bridge_slave_0) entered blocking state [ 137.665231][ T44] bridge0: port 1(bridge_slave_0) entered forwarding state [ 137.855896][ T11] bridge0: port 2(bridge_slave_1) entered blocking state [ 137.858073][ T11] bridge0: port 2(bridge_slave_1) entered forwarding state [ 138.109227][ T662] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 138.112264][ T662] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 138.375064][ T7541] loop1: detected capacity change from 0 to 128 [ 138.422326][ T7541] EXT4-fs (loop1): mounted filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09 r/w without journal. Quota mode: none. [ 138.865016][ T7131] EXT4-fs (loop1): unmounting filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09. [ 138.879748][ T7394] 8021q: adding VLAN 0 to HW filter on device bond0 [ 139.138815][ T7303] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 139.172068][ T7394] 8021q: adding VLAN 0 to HW filter on device team0 [ 139.295331][ T7394] hsr0: Slave A (hsr_slave_0) is not up; please bring it up to get a fully working HSR network [ 139.299064][ T7394] hsr0: Slave B (hsr_slave_1) is not up; please bring it up to get a fully working HSR network [ 139.322979][ T662] bridge0: port 1(bridge_slave_0) entered blocking state [ 139.325080][ T662] bridge0: port 1(bridge_slave_0) entered forwarding state [ 139.329817][ T662] bridge0: port 2(bridge_slave_1) entered blocking state [ 139.331932][ T662] bridge0: port 2(bridge_slave_1) entered forwarding state [ 139.476232][ T7561] netlink: 16 bytes leftover after parsing attributes in process `syz.1.156'. [ 140.224441][ T7415] netdevsim netdevsim4 netdevsim0: renamed from eth0 [ 140.235522][ T7415] netdevsim netdevsim4 netdevsim1: renamed from eth1 [ 140.243325][ T7415] netdevsim netdevsim4 netdevsim2: renamed from eth2 [ 140.257973][ T7415] netdevsim netdevsim4 netdevsim3: renamed from eth3 [ 140.326311][ T7303] veth0_vlan: entered promiscuous mode [ 140.340761][ T7394] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 140.358175][ T7303] veth1_vlan: entered promiscuous mode [ 140.824766][ T7579] loop1: detected capacity change from 0 to 128 [ 140.887607][ T7579] EXT4-fs (loop1): mounted filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09 r/w without journal. Quota mode: none. [ 141.241522][ T7394] veth0_vlan: entered promiscuous mode [ 141.339594][ T7131] EXT4-fs (loop1): unmounting filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09. [ 141.519937][ T7394] veth1_vlan: entered promiscuous mode [ 141.586390][ T7303] veth0_macvtap: entered promiscuous mode [ 141.593245][ T7303] veth1_macvtap: entered promiscuous mode [ 141.605854][ T43] hsr_slave_0: left promiscuous mode [ 141.627764][ T43] hsr_slave_1: left promiscuous mode [ 141.717054][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 141.719681][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 141.722296][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 141.724456][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 141.732219][ T43] hsr_slave_0: left promiscuous mode [ 141.778795][ T43] hsr_slave_1: left promiscuous mode [ 141.867001][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 141.869119][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 141.877772][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 141.880055][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 141.892453][ T43] hsr_slave_0: left promiscuous mode [ 141.927324][ T43] hsr_slave_1: left promiscuous mode [ 142.016987][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 142.019163][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 142.022062][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 142.024292][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 142.057708][ T43] veth1_macvtap: left promiscuous mode [ 142.059325][ T43] veth0_macvtap: left promiscuous mode [ 142.060887][ T43] veth1_vlan: left promiscuous mode [ 142.062444][ T43] veth0_vlan: left promiscuous mode [ 142.064781][ T43] veth1_macvtap: left promiscuous mode [ 142.066177][ T43] veth0_macvtap: left promiscuous mode [ 142.069271][ T43] veth1_vlan: left promiscuous mode [ 142.070686][ T43] veth0_vlan: left promiscuous mode [ 142.072923][ T43] veth1_macvtap: left promiscuous mode [ 142.074499][ T43] veth0_macvtap: left promiscuous mode [ 142.078641][ T43] veth1_vlan: left promiscuous mode [ 142.081210][ T43] veth0_vlan: left promiscuous mode [ 142.416895][ T6405] Bluetooth: hci1: command 0x0406 tx timeout [ 144.110893][ T43] team0 (unregistering): Port device team_slave_1 removed [ 144.328697][ T43] team0 (unregistering): Port device team_slave_0 removed [ 148.619603][ T43] team0 (unregistering): Port device team_slave_1 removed [ 148.860187][ T43] team0 (unregistering): Port device team_slave_0 removed [ 152.514948][ T6405] Bluetooth: hci5: unexpected cc 0x0c03 length: 249 > 1 [ 152.527405][ T6411] Bluetooth: hci5: unexpected cc 0x1003 length: 249 > 9 [ 152.530124][ T6411] Bluetooth: hci5: unexpected cc 0x1001 length: 249 > 9 [ 152.532583][ T6411] Bluetooth: hci5: unexpected cc 0x0c23 length: 249 > 4 [ 152.534814][ T6411] Bluetooth: hci5: unexpected cc 0x0c25 length: 249 > 3 [ 152.537713][ T6411] Bluetooth: hci5: unexpected cc 0x0c38 length: 249 > 2 [ 152.548977][ T53] Bluetooth: hci6: unexpected cc 0x0c03 length: 249 > 1 [ 152.561472][ T53] Bluetooth: hci6: unexpected cc 0x1003 length: 249 > 9 [ 152.564437][ T6407] Bluetooth: hci6: unexpected cc 0x1001 length: 249 > 9 [ 152.570310][ T6407] Bluetooth: hci6: unexpected cc 0x0c23 length: 249 > 4 [ 152.573374][ T6407] Bluetooth: hci6: unexpected cc 0x0c25 length: 249 > 3 [ 152.575500][ T6407] Bluetooth: hci6: unexpected cc 0x0c38 length: 249 > 2 [ 153.121212][ T43] team0 (unregistering): Port device team_slave_1 removed [ 153.300678][ T43] team0 (unregistering): Port device team_slave_0 removed [ 154.576921][ T6407] Bluetooth: hci5: command tx timeout [ 154.658114][ T6407] Bluetooth: hci6: command tx timeout [ 155.687876][ T7394] veth0_macvtap: entered promiscuous mode [ 155.710845][ T7303] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 155.713813][ T7303] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 155.716421][ T7303] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 155.726385][ T7303] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 155.730528][ T7303] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 155.735602][ T7303] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 155.745399][ T7303] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 155.749443][ T7303] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 155.752500][ T7303] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 155.756218][ T7303] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 155.799604][ T7394] veth1_macvtap: entered promiscuous mode [ 155.803778][ T7303] netdevsim netdevsim3 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 155.806271][ T7303] netdevsim netdevsim3 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 155.814009][ T7303] netdevsim netdevsim3 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 155.816506][ T7303] netdevsim netdevsim3 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 155.845411][ T7394] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 155.848634][ T7394] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 155.851244][ T7394] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 155.854042][ T7394] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 155.857900][ T7394] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 155.860706][ T7394] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 155.871688][ T7394] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 155.945094][ T7394] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 155.956489][ T7394] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 155.965882][ T7394] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 155.970214][ T7394] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 155.973123][ T7394] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 155.976048][ T7394] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 155.980560][ T7394] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 155.984552][ T7394] netdevsim netdevsim0 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 155.988842][ T7394] netdevsim netdevsim0 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 155.991364][ T7394] netdevsim netdevsim0 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 155.993721][ T7394] netdevsim netdevsim0 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 156.049076][ T7415] 8021q: adding VLAN 0 to HW filter on device bond0 [ 156.194092][ T7415] 8021q: adding VLAN 0 to HW filter on device team0 [ 156.208208][ T44] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 156.210347][ T44] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 156.279583][ T662] bridge0: port 1(bridge_slave_0) entered blocking state [ 156.281537][ T662] bridge0: port 1(bridge_slave_0) entered forwarding state [ 156.316187][ T44] bridge0: port 2(bridge_slave_1) entered blocking state [ 156.318185][ T44] bridge0: port 2(bridge_slave_1) entered forwarding state [ 156.322467][ T7597] chnl_net:caif_netlink_parms(): no params data found [ 156.344258][ T229] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 156.347036][ T229] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 156.398282][ T364] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 156.400478][ T364] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 156.422865][ T7597] bridge0: port 1(bridge_slave_0) entered blocking state [ 156.424763][ T7597] bridge0: port 1(bridge_slave_0) entered disabled state [ 156.442799][ T7597] bridge_slave_0: entered allmulticast mode [ 156.445942][ T7597] bridge_slave_0: entered promiscuous mode [ 156.460238][ T7597] bridge0: port 2(bridge_slave_1) entered blocking state [ 156.462345][ T7597] bridge0: port 2(bridge_slave_1) entered disabled state [ 156.464696][ T7597] bridge_slave_1: entered allmulticast mode [ 156.476060][ T7597] bridge_slave_1: entered promiscuous mode [ 156.488609][ T660] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 156.491103][ T660] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 156.518722][ T7599] chnl_net:caif_netlink_parms(): no params data found [ 156.533888][ T7597] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 156.545682][ T7415] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 156.631609][ T7597] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 156.687613][ T6407] Bluetooth: hci5: command tx timeout [ 156.723594][ T7597] team0: Port device team_slave_0 added [ 156.737649][ T6407] Bluetooth: hci6: command tx timeout [ 156.805512][ T7597] team0: Port device team_slave_1 added [ 156.867745][ T7599] bridge0: port 1(bridge_slave_0) entered blocking state [ 156.880342][ T7599] bridge0: port 1(bridge_slave_0) entered disabled state [ 156.889901][ T7599] bridge_slave_0: entered allmulticast mode [ 156.904641][ T7599] bridge_slave_0: entered promiscuous mode [ 157.018992][ T7597] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 157.025162][ T7597] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 157.041882][ T7597] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 157.046365][ T7597] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 157.051378][ T7597] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 157.064558][ T7597] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 157.070966][ T7599] bridge0: port 2(bridge_slave_1) entered blocking state [ 157.072927][ T7599] bridge0: port 2(bridge_slave_1) entered disabled state [ 157.074952][ T7599] bridge_slave_1: entered allmulticast mode [ 157.077363][ T7599] bridge_slave_1: entered promiscuous mode [ 157.111104][ T7599] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 157.532942][ T7599] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 157.808579][ T7597] hsr_slave_0: entered promiscuous mode [ 157.887801][ T7597] hsr_slave_1: entered promiscuous mode [ 157.998983][ T7599] team0: Port device team_slave_0 added [ 158.127568][ T7599] team0: Port device team_slave_1 added [ 158.397146][ T7648] loop0: detected capacity change from 0 to 128 [ 158.475076][ T7648] EXT4-fs (loop0): mounted filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09 r/w without journal. Quota mode: none. [ 158.746765][ T6407] Bluetooth: hci5: command tx timeout [ 158.786903][ T7599] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 158.788758][ T7599] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 158.795842][ T7599] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 158.800299][ T7599] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 158.802273][ T7599] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 158.809463][ T7599] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 158.826983][ T6407] Bluetooth: hci6: command tx timeout [ 158.884070][ T7394] EXT4-fs (loop0): unmounting filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09. [ 158.948802][ T7599] hsr_slave_0: entered promiscuous mode [ 158.997238][ T7599] hsr_slave_1: entered promiscuous mode [ 159.067199][ T7599] debugfs: Directory 'hsr0' with parent 'hsr' already present! [ 159.069502][ T7599] Cannot create hsr debugfs directory [ 159.357507][ T7655] loop0: detected capacity change from 0 to 128 [ 159.455935][ T7655] EXT4-fs (loop0): mounted filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09 r/w without journal. Quota mode: none. [ 159.739829][ T7597] netdevsim netdevsim2 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 159.751864][ T7415] veth0_vlan: entered promiscuous mode [ 159.781698][ T7415] veth1_vlan: entered promiscuous mode [ 159.843154][ T7394] EXT4-fs (loop0): unmounting filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09. [ 159.878614][ T7597] netdevsim netdevsim2 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 159.903142][ T7658] binder: 7657:7658 tried to acquire reference to desc 0, got 1 instead [ 159.910465][ T7658] binder: 7657:7658 got transaction to context manager from process owning it [ 159.912890][ T7658] binder: 7657:7658 transaction call to 7657:0 failed 27/29201/-22, size 0-0 line 3133 [ 159.915444][ T7658] binder: 7657:7658 ioctl c0306201 200002c0 returned -14 [ 159.920546][ T8] binder: release 7657:7658 transaction 24 out, still active [ 159.922611][ T8] binder: undelivered TRANSACTION_COMPLETE [ 159.924192][ T8] binder: undelivered TRANSACTION_ERROR: 29201 [ 159.937247][ T8] binder: send failed reply for transaction 24, target dead [ 159.966494][ T7660] loop0: detected capacity change from 0 to 1024 [ 159.980974][ T7660] hfsplus: failed to load extents file [ 159.988724][ T7660] binder: 7659:7660 ERROR: BC_REGISTER_LOOPER called without request [ 159.991459][ T7660] binder: 7659:7660 got reply transaction with no transaction stack [ 159.993684][ T7660] binder: 7659:7660 transaction reply to 0:0 failed 28/29201/-71, size 0-0 line 3045 [ 160.158518][ T7597] netdevsim netdevsim2 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 160.204553][ T7664] Injecting memory failure for pfn 0x140809 at process virtual address 0x20009000 [ 160.212624][ T7664] Memory failure: 0x140809: recovery action for dirty LRU page: Recovered [ 160.469392][ T7597] netdevsim netdevsim2 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 160.530091][ T7415] veth0_macvtap: entered promiscuous mode [ 160.534735][ T7415] veth1_macvtap: entered promiscuous mode [ 160.542463][ T7415] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 160.545406][ T7415] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 160.548262][ T7415] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 160.551082][ T7415] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 160.553728][ T7415] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 160.556461][ T7415] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 160.559953][ T7415] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 160.562742][ T7415] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 160.566421][ T7415] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 160.574186][ T7415] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 160.577419][ T7415] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 160.579492][ T10] usb 1-1: new high-speed USB device number 3 using dummy_hcd [ 160.580267][ T7415] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 160.580284][ T7415] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 160.580303][ T7415] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 160.580312][ T7415] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 160.580328][ T7415] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 160.580338][ T7415] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 160.581238][ T7415] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 160.606373][ T7415] netdevsim netdevsim4 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 160.609204][ T7415] netdevsim netdevsim4 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 160.613270][ T7415] netdevsim netdevsim4 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 160.615537][ T7415] netdevsim netdevsim4 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 160.671567][ T660] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 160.673779][ T660] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 160.759038][ T10] usb 1-1: config 0 interface 0 altsetting 0 endpoint 0x81 has an invalid bInterval 0, changing to 7 [ 160.762057][ T10] usb 1-1: config 0 interface 0 altsetting 0 endpoint 0x81 has invalid wMaxPacketSize 0 [ 160.764633][ T10] usb 1-1: New USB device found, idVendor=1e7d, idProduct=319c, bcdDevice= 0.00 [ 160.769481][ T10] usb 1-1: New USB device strings: Mfr=0, Product=0, SerialNumber=0 [ 160.773998][ T43] netdevsim netdevsim1 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 160.779316][ T10] usb 1-1: config 0 descriptor?? [ 160.801918][ T229] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 160.804108][ T229] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 160.828311][ T6407] Bluetooth: hci5: command tx timeout [ 160.833187][ T7597] netdevsim netdevsim2 netdevsim0: renamed from eth0 [ 160.841921][ T7597] netdevsim netdevsim2 netdevsim1: renamed from eth1 [ 160.896777][ T6407] Bluetooth: hci6: command tx timeout [ 160.909154][ T43] netdevsim netdevsim1 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 160.918042][ T7597] netdevsim netdevsim2 netdevsim2: renamed from eth2 [ 160.921833][ T7597] netdevsim netdevsim2 netdevsim3: renamed from eth3 [ 161.003690][ T43] netdevsim netdevsim1 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 161.118636][ T43] netdevsim netdevsim1 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 161.168060][ T30] audit: type=1326 audit(161.140:3): auid=4294967295 uid=0 gid=0 ses=4294967295 subj=_ pid=7676 comm="syz.4.182" exe="/root/syz-executor" sig=31 arch=c00000b7 syscall=98 compat=0 ip=0xffff80745ee8 code=0x0 [ 161.178932][ T7597] 8021q: adding VLAN 0 to HW filter on device bond0 [ 161.189117][ T7597] 8021q: adding VLAN 0 to HW filter on device team0 [ 161.195914][ T364] bridge0: port 1(bridge_slave_0) entered blocking state [ 161.198028][ T364] bridge0: port 1(bridge_slave_0) entered forwarding state [ 161.213587][ T10] isku 0003:1E7D:319C.0002: item fetching failed at offset 0/7 [ 161.215962][ T10] isku 0003:1E7D:319C.0002: parse failed [ 161.223597][ T10] isku 0003:1E7D:319C.0002: probe with driver isku failed with error -22 [ 161.233499][ T364] bridge0: port 2(bridge_slave_1) entered blocking state [ 161.235479][ T364] bridge0: port 2(bridge_slave_1) entered forwarding state [ 161.264533][ T7680] loop3: detected capacity change from 0 to 8 [ 161.274167][ T7680] SQUASHFS error: zlib decompression failed, data probably corrupt [ 161.276558][ T7680] SQUASHFS error: Failed to read block 0x9b: -5 [ 161.284168][ T7680] SQUASHFS error: Unable to read metadata cache entry [99] [ 161.286181][ T7680] SQUASHFS error: Unable to read inode 0x127 [ 161.410583][ T43] bridge_slave_1: left allmulticast mode [ 161.412189][ T43] bridge_slave_1: left promiscuous mode [ 161.413780][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 161.435770][ T6385] usb 1-1: USB disconnect, device number 3 [ 161.441826][ T43] bridge_slave_0: left allmulticast mode [ 161.446043][ T43] bridge_slave_0: left promiscuous mode [ 161.453100][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 161.481638][ T43] bridge_slave_1: left allmulticast mode [ 161.483283][ T43] bridge_slave_1: left promiscuous mode [ 161.484849][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 161.491402][ T43] bridge_slave_0: left allmulticast mode [ 161.493088][ T43] bridge_slave_0: left promiscuous mode [ 161.494754][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 165.730559][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 165.769907][ T43] bond0 (unregistering): (slave bond_slave_1): Releasing backup interface [ 165.810627][ T43] bond0 (unregistering): Released all slaves [ 166.519507][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 166.559439][ T43] bond0 (unregistering): (slave bond_slave_1): Releasing backup interface [ 166.619688][ T43] bond0 (unregistering): Released all slaves [ 166.740806][ T7597] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 166.886344][ T7733] veth1_to_bridge: entered promiscuous mode [ 166.924365][ T7734] netlink: 4 bytes leftover after parsing attributes in process `syz.0.193'. [ 166.928801][ T7734] bridge0: port 2(bridge_slave_1) entered disabled state [ 166.932608][ T7734] bridge0: port 2(bridge_slave_1) entered blocking state [ 166.934917][ T7734] bridge0: port 2(bridge_slave_1) entered forwarding state [ 167.013609][ T7742] loop0: detected capacity change from 0 to 512 [ 167.044638][ T7742] EXT4-fs (loop0): mounted filesystem 00000000-0000-0000-0000-000000000000 r/w without journal. Quota mode: writeback. [ 167.084541][ T7394] EXT4-fs (loop0): unmounting filesystem 00000000-0000-0000-0000-000000000000. [ 167.135319][ T7748] batman_adv: batadv0: adding TT local entry aa:aa:aa:aa:aa:2a to non-existent VLAN 1280 [ 167.161918][ T662] bridge0: port 2(bridge_slave_1) entered disabled state [ 167.180107][ T7740] ip6gretap0: entered promiscuous mode [ 167.182041][ T7740] macvtap1: entered allmulticast mode [ 167.183504][ T7740] ip6gretap0: entered allmulticast mode [ 167.218954][ T7738] loop3: detected capacity change from 0 to 32768 [ 167.226235][ T7738] BTRFS: device fsid c9fe44da-de57-406a-8241-57ec7d4412cf devid 1 transid 8 /dev/loop3 (7:3) scanned by syz.3.195 (7738) [ 167.243618][ T7750] binder: 7749:7750 tried to acquire reference to desc 0, got 1 instead [ 167.251243][ T7597] veth0_vlan: entered promiscuous mode [ 167.257049][ T7738] BTRFS info (device loop3): first mount of filesystem c9fe44da-de57-406a-8241-57ec7d4412cf [ 167.259779][ T7738] BTRFS info (device loop3): using crc32c (crc32c-generic) checksum algorithm [ 167.262194][ T7738] BTRFS info (device loop3): using free-space-tree [ 167.300700][ T7765] binder_alloc: 7749: binder_alloc_buf size 8 failed, no address space [ 167.303616][ T7765] binder_alloc: allocated: 4096 (num: 1 largest: 4096), free: 0 (num: 0 largest: 0) [ 167.318653][ T7765] binder: cannot allocate buffer: no space left [ 167.318714][ T7765] binder: 7749:7765 transaction call to 7749:0 failed 34/29201/-28, size 0-0 line 3332 [ 167.347277][ T7738] BTRFS info (device loop3): checking UUID tree [ 167.460558][ T7303] BTRFS info (device loop3): last unmount of filesystem c9fe44da-de57-406a-8241-57ec7d4412cf [ 167.483331][ T7597] veth1_vlan: entered promiscuous mode [ 167.605564][ T7599] netdevsim netdevsim1 netdevsim0: renamed from eth0 [ 167.726973][ T7599] netdevsim netdevsim1 netdevsim1: renamed from eth1 [ 167.732092][ T7599] netdevsim netdevsim1 netdevsim2: renamed from eth2 [ 167.745423][ T7599] netdevsim netdevsim1 netdevsim3: renamed from eth3 [ 167.755752][ T7597] veth0_macvtap: entered promiscuous mode [ 167.791180][ T7772] loop4: detected capacity change from 0 to 32768 [ 167.796523][ T7772] BTRFS: device fsid c9fe44da-de57-406a-8241-57ec7d4412cf devid 1 transid 8 /dev/loop4 (7:4) scanned by syz.4.201 (7772) [ 167.803108][ T7772] BTRFS info (device loop4): first mount of filesystem c9fe44da-de57-406a-8241-57ec7d4412cf [ 167.806029][ T7772] BTRFS info (device loop4): using crc32c (crc32c-generic) checksum algorithm [ 167.814620][ T7772] BTRFS info (device loop4): using free-space-tree [ 167.826434][ T7597] veth1_macvtap: entered promiscuous mode [ 167.905197][ T7597] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 167.915307][ T7597] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 167.930642][ T7597] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 167.939585][ T7597] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 167.945564][ T7597] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 167.952634][ T7597] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 167.956086][ T7597] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 167.964382][ T7597] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 167.967877][ T7597] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 167.972386][ T7597] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 167.981857][ T7597] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 168.013916][ T7415] BTRFS info (device loop4): last unmount of filesystem c9fe44da-de57-406a-8241-57ec7d4412cf [ 168.048640][ T27] binder: release 7749:7750 transaction 33 out, still active [ 168.050872][ T27] binder: undelivered TRANSACTION_COMPLETE [ 168.052401][ T27] binder: undelivered TRANSACTION_ERROR: 29201 [ 168.076977][ T6477] binder: send failed reply for transaction 33, target dead [ 168.258756][ T7597] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 168.272016][ T7597] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 168.274648][ T7597] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 168.282627][ T7597] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 168.285621][ T7597] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 168.296975][ T7597] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 168.299670][ T7597] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 168.315501][ T7597] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 168.322967][ T7597] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 168.325877][ T7597] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 168.337128][ T7597] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 168.361824][ T7807] 8021q: adding VLAN 0 to HW filter on device bond0 [ 168.373599][ T7807] team0: Port device bond0 added [ 168.440344][ T7812] team0: Port device team_slave_0 removed [ 168.442591][ T7812] A link change request failed with some changes committed already. Interface team_slave_0 may have been left with an inconsistent configuration, please check. [ 168.523861][ T7597] netdevsim netdevsim2 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 168.534522][ T7597] netdevsim netdevsim2 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 168.539606][ T7597] netdevsim netdevsim2 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 168.542271][ T7597] netdevsim netdevsim2 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 168.609254][ T43] hsr_slave_0: left promiscuous mode [ 168.657255][ T43] hsr_slave_1: left promiscuous mode [ 168.726997][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 168.729073][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 168.731761][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 168.733770][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 168.748246][ T43] hsr_slave_0: left promiscuous mode [ 168.787064][ T43] hsr_slave_1: left promiscuous mode [ 168.866906][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 168.868462][ T7846] UDC core: USB Raw Gadget: couldn't find an available UDC or it's busy [ 168.872492][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 168.877777][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 168.879927][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 168.907886][ T7846] misc raw-gadget: fail, usb_gadget_register_driver returned -16 [ 168.915533][ T43] veth1_macvtap: left promiscuous mode [ 168.918495][ T43] veth0_macvtap: left promiscuous mode [ 168.920082][ T43] veth1_vlan: left promiscuous mode [ 168.921705][ T43] veth0_vlan: left promiscuous mode [ 168.925295][ T43] veth1_macvtap: left promiscuous mode [ 168.926993][ T43] veth0_macvtap: left promiscuous mode [ 168.928745][ T43] veth1_vlan: left promiscuous mode [ 168.930347][ T43] veth0_vlan: left promiscuous mode [ 170.855139][ T43] team0 (unregistering): Port device team_slave_1 removed [ 171.030197][ T43] team0 (unregistering): Port device team_slave_0 removed [ 175.594162][ T43] team0 (unregistering): Port device team_slave_1 removed [ 175.800122][ T43] team0 (unregistering): Port device team_slave_0 removed [ 178.412472][ T7856] team_slave_0: entered promiscuous mode [ 178.414467][ T7856] team_slave_1: entered promiscuous mode [ 178.422256][ T7860] netlink: 'syz.4.212': attribute type 10 has an invalid length. [ 178.426732][ T7860] team_slave_0: left promiscuous mode [ 178.428455][ T7860] team_slave_1: left promiscuous mode [ 178.443037][ T7860] team_slave_0: entered promiscuous mode [ 178.444653][ T7860] team_slave_1: entered promiscuous mode [ 178.452375][ T7860] 8021q: adding VLAN 0 to HW filter on device team0 [ 178.456402][ T7860] bond0: (slave team0): Enslaving as an active interface with an up link [ 178.462319][ T7862] warning: `syz.3.213' uses wireless extensions which will stop working for Wi-Fi 7 hardware; use nl80211 [ 178.517623][ T7859] team_slave_0: left promiscuous mode [ 178.519509][ T7859] team_slave_1: left promiscuous mode [ 178.593566][ T7599] 8021q: adding VLAN 0 to HW filter on device bond0 [ 178.642867][ T7599] 8021q: adding VLAN 0 to HW filter on device team0 [ 178.662486][ T662] bridge0: port 1(bridge_slave_0) entered blocking state [ 178.664470][ T662] bridge0: port 1(bridge_slave_0) entered forwarding state [ 178.688331][ T229] bridge0: port 2(bridge_slave_1) entered blocking state [ 178.690089][ T229] bridge0: port 2(bridge_slave_1) entered forwarding state [ 178.703786][ T364] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 178.706330][ T364] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 178.729847][ T7599] hsr0: Slave A (hsr_slave_0) is not up; please bring it up to get a fully working HSR network [ 178.732613][ T7599] hsr0: Slave B (hsr_slave_1) is not up; please bring it up to get a fully working HSR network [ 178.766757][ T660] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 178.782078][ T660] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 178.786281][ T6405] Bluetooth: hci1: unexpected cc 0x0c03 length: 249 > 1 [ 178.795464][ T6405] Bluetooth: hci1: unexpected cc 0x1003 length: 249 > 9 [ 178.798877][ T6405] Bluetooth: hci1: unexpected cc 0x1001 length: 249 > 9 [ 178.808807][ T6405] Bluetooth: hci1: unexpected cc 0x0c23 length: 249 > 4 [ 178.811449][ T6405] Bluetooth: hci1: unexpected cc 0x0c25 length: 249 > 3 [ 178.813575][ T6405] Bluetooth: hci1: unexpected cc 0x0c38 length: 249 > 2 [ 179.073337][ T7599] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 179.726896][ T7872] chnl_net:caif_netlink_parms(): no params data found [ 179.860167][ T7894] binder: 7893:7894 tried to acquire reference to desc 0, got 1 instead [ 179.866524][ T27] binder: release 7893:7894 transaction 39 out, still active [ 179.878843][ T27] binder: undelivered TRANSACTION_COMPLETE [ 179.893155][ T7599] veth0_vlan: entered promiscuous mode [ 179.909068][ T27] binder: send failed reply for transaction 39, target dead [ 179.995838][ T7599] veth1_vlan: entered promiscuous mode [ 180.037543][ T7872] bridge0: port 1(bridge_slave_0) entered blocking state [ 180.039516][ T7872] bridge0: port 1(bridge_slave_0) entered disabled state [ 180.047022][ T7872] bridge_slave_0: entered allmulticast mode [ 180.049192][ T7872] bridge_slave_0: entered promiscuous mode [ 180.055415][ T7872] bridge0: port 2(bridge_slave_1) entered blocking state [ 180.064922][ T7872] bridge0: port 2(bridge_slave_1) entered disabled state [ 180.074756][ T7872] bridge_slave_1: entered allmulticast mode [ 180.077287][ T7872] bridge_slave_1: entered promiscuous mode [ 180.108155][ T7599] veth0_macvtap: entered promiscuous mode [ 180.111827][ T7599] veth1_macvtap: entered promiscuous mode [ 180.157802][ T7872] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 180.162000][ T7872] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 180.181714][ T7599] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 180.184537][ T7599] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 180.200490][ T7599] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 180.203330][ T7599] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 180.205876][ T7599] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 180.216993][ T7599] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 180.219625][ T7599] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 180.222362][ T7599] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 180.226175][ T7599] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 180.228351][ T7908] loop2: detected capacity change from 0 to 1024 [ 180.253548][ T7599] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 180.256344][ T7599] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 180.276698][ T7599] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 180.279538][ T7599] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 180.282373][ T7599] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 180.293586][ T7599] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 180.296397][ T7599] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 180.297159][ T7908] EXT4-fs (loop2): mounted filesystem 00000000-0000-0000-0000-000000000000 r/w without journal. Quota mode: none. [ 180.316886][ T7599] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 180.331240][ T7599] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 180.362281][ T7906] netlink: 'syz.2.223': attribute type 4 has an invalid length. [ 180.414827][ T7872] team0: Port device team_slave_0 added [ 180.437381][ T7872] team0: Port device team_slave_1 added [ 180.440466][ T7599] netdevsim netdevsim1 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 180.442912][ T7599] netdevsim netdevsim1 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 180.445246][ T7599] netdevsim netdevsim1 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 180.455562][ T7597] EXT4-fs (loop2): unmounting filesystem 00000000-0000-0000-0000-000000000000. [ 180.495373][ T7599] netdevsim netdevsim1 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 180.595639][ T7907] loop4: detected capacity change from 0 to 32768 [ 180.636329][ T7872] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 180.638572][ T7872] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 180.645344][ T7872] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 180.689793][ T7872] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 180.691622][ T7872] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 180.727071][ T7872] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 180.742471][ T7916] syzkaller0: entered promiscuous mode [ 180.743926][ T7916] syzkaller0: entered allmulticast mode [ 180.846453][ T7913] loop2: detected capacity change from 0 to 32768 [ 180.853831][ T7913] BTRFS: device fsid 395ef67a-297e-477c-816d-cd80a5b93e5d devid 1 transid 8 /dev/loop2 (7:2) scanned by syz.2.225 (7913) [ 180.871885][ T7913] BTRFS info (device loop2): first mount of filesystem 395ef67a-297e-477c-816d-cd80a5b93e5d [ 180.874688][ T7913] BTRFS info (device loop2): using sha256 (sha256-ce) checksum algorithm [ 180.890251][ T7913] BTRFS info (device loop2): using free-space-tree [ 180.896938][ T6407] Bluetooth: hci1: command tx timeout [ 181.046295][ T7597] BTRFS info (device loop2): last unmount of filesystem 395ef67a-297e-477c-816d-cd80a5b93e5d [ 182.976967][ T6407] Bluetooth: hci1: command tx timeout [ 185.066881][ T6407] Bluetooth: hci1: command tx timeout [ 187.136830][ T6407] Bluetooth: hci1: command tx timeout [ 187.378219][ T2335] ieee802154 phy0 wpan0: encryption failed: -22 [ 187.380177][ T2335] ieee802154 phy1 wpan1: encryption failed: -22 [ 191.609072][ T6405] Bluetooth: hci0: unexpected cc 0x0c03 length: 249 > 1 [ 191.613559][ T6405] Bluetooth: hci0: unexpected cc 0x1003 length: 249 > 9 [ 191.621377][ T6411] Bluetooth: hci0: unexpected cc 0x1001 length: 249 > 9 [ 191.624638][ T53] Bluetooth: hci3: unexpected cc 0x0c03 length: 249 > 1 [ 191.649501][ T6401] Bluetooth: hci3: unexpected cc 0x1003 length: 249 > 9 [ 191.650828][ T6411] Bluetooth: hci0: unexpected cc 0x0c23 length: 249 > 4 [ 191.653006][ T6401] Bluetooth: hci7: unexpected cc 0x0c03 length: 249 > 1 [ 191.653718][ T6411] Bluetooth: hci3: unexpected cc 0x1001 length: 249 > 9 [ 191.661413][ T6401] Bluetooth: hci7: unexpected cc 0x1003 length: 249 > 9 [ 191.663555][ T6411] Bluetooth: hci3: unexpected cc 0x0c23 length: 249 > 4 [ 191.664379][ T6401] Bluetooth: hci0: unexpected cc 0x0c25 length: 249 > 3 [ 191.668116][ T5963] Bluetooth: hci3: unexpected cc 0x0c25 length: 249 > 3 [ 191.668955][ T6401] Bluetooth: hci7: unexpected cc 0x1001 length: 249 > 9 [ 191.670203][ T5963] Bluetooth: hci0: unexpected cc 0x0c38 length: 249 > 2 [ 191.672240][ T6401] Bluetooth: hci3: unexpected cc 0x0c38 length: 249 > 2 [ 191.676524][ T6401] Bluetooth: hci7: unexpected cc 0x0c23 length: 249 > 4 [ 191.680923][ T6407] Bluetooth: hci7: unexpected cc 0x0c25 length: 249 > 3 [ 191.685239][ T6407] Bluetooth: hci7: unexpected cc 0x0c38 length: 249 > 2 [ 192.977797][ T662] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 192.980016][ T662] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 193.017322][ C0] Illegal XDP return value 16128 on prog (id 12) dev bond_slave_0, expect packet loss! [ 193.081201][ T7872] hsr_slave_0: entered promiscuous mode [ 193.117189][ T7872] hsr_slave_1: entered promiscuous mode [ 193.157111][ T7872] debugfs: Directory 'hsr0' with parent 'hsr' already present! [ 193.159168][ T7872] Cannot create hsr debugfs directory [ 193.510177][ T43] netdevsim netdevsim0 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 193.524217][ T660] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 193.526440][ T660] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 193.696971][ T6407] Bluetooth: hci3: command tx timeout [ 193.696996][ T6401] Bluetooth: hci0: command tx timeout [ 193.778756][ T6401] Bluetooth: hci7: command tx timeout [ 193.779853][ T43] netdevsim netdevsim0 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 193.889505][ T43] netdevsim netdevsim0 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 194.075534][ T43] netdevsim netdevsim0 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 194.671569][ T7944] chnl_net:caif_netlink_parms(): no params data found [ 194.681107][ T7945] chnl_net:caif_netlink_parms(): no params data found [ 194.774252][ T7944] bridge0: port 1(bridge_slave_0) entered blocking state [ 194.776393][ T7944] bridge0: port 1(bridge_slave_0) entered disabled state [ 194.782154][ T7944] bridge_slave_0: entered allmulticast mode [ 194.786602][ T7944] bridge_slave_0: entered promiscuous mode [ 194.795920][ T7944] bridge0: port 2(bridge_slave_1) entered blocking state [ 194.802302][ T7944] bridge0: port 2(bridge_slave_1) entered disabled state [ 194.804763][ T7944] bridge_slave_1: entered allmulticast mode [ 194.807579][ T7944] bridge_slave_1: entered promiscuous mode [ 194.858007][ T7945] bridge0: port 1(bridge_slave_0) entered blocking state [ 194.862390][ T7945] bridge0: port 1(bridge_slave_0) entered disabled state [ 194.864536][ T7945] bridge_slave_0: entered allmulticast mode [ 194.871081][ T7945] bridge_slave_0: entered promiscuous mode [ 194.879525][ T7949] chnl_net:caif_netlink_parms(): no params data found [ 194.902800][ T7945] bridge0: port 2(bridge_slave_1) entered blocking state [ 194.904877][ T7945] bridge0: port 2(bridge_slave_1) entered disabled state [ 194.907313][ T7945] bridge_slave_1: entered allmulticast mode [ 194.909501][ T7945] bridge_slave_1: entered promiscuous mode [ 194.933266][ T7944] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 194.953803][ T7945] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 194.958126][ T7944] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 194.965880][ T7945] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 195.015239][ T7949] bridge0: port 1(bridge_slave_0) entered blocking state [ 195.017548][ T7949] bridge0: port 1(bridge_slave_0) entered disabled state [ 195.019581][ T7949] bridge_slave_0: entered allmulticast mode [ 195.021816][ T7949] bridge_slave_0: entered promiscuous mode [ 195.029863][ T7944] team0: Port device team_slave_0 added [ 195.043729][ T7949] bridge0: port 2(bridge_slave_1) entered blocking state [ 195.045786][ T7949] bridge0: port 2(bridge_slave_1) entered disabled state [ 195.052532][ T7949] bridge_slave_1: entered allmulticast mode [ 195.054769][ T7949] bridge_slave_1: entered promiscuous mode [ 195.060469][ T7944] team0: Port device team_slave_1 added [ 195.090701][ T7945] team0: Port device team_slave_0 added [ 195.110496][ T43] bridge_slave_1: left allmulticast mode [ 195.112265][ T43] bridge_slave_1: left promiscuous mode [ 195.114504][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 195.118149][ T43] bridge_slave_0: left allmulticast mode [ 195.119626][ T43] bridge_slave_0: left promiscuous mode [ 195.121295][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 195.776880][ T6401] Bluetooth: hci3: command tx timeout [ 195.778381][ T6401] Bluetooth: hci0: command tx timeout [ 195.866809][ T6407] Bluetooth: hci7: command tx timeout [ 196.822738][ T43] team0: Port device bond0 removed [ 196.826525][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 196.880107][ T43] bond0 (unregistering): (slave bond_slave_1): Releasing backup interface [ 196.919951][ T43] bond0 (unregistering): Released all slaves [ 196.954442][ T7945] team0: Port device team_slave_1 added [ 196.960090][ T7949] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 196.963027][ T7944] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 196.964936][ T7944] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 196.985179][ T7944] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 196.989730][ T7944] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 196.991479][ T7944] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 196.998992][ T7944] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 197.067587][ T7949] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 197.233104][ T7945] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 197.235073][ T7945] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 197.244648][ T7945] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 197.310947][ T7945] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 197.312923][ T7945] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 197.346387][ T7945] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 197.364589][ T7949] team0: Port device team_slave_0 added [ 197.368108][ T7949] team0: Port device team_slave_1 added [ 197.418745][ T7944] hsr_slave_0: entered promiscuous mode [ 197.457295][ T7944] hsr_slave_1: entered promiscuous mode [ 197.499589][ T7944] debugfs: Directory 'hsr0' with parent 'hsr' already present! [ 197.501785][ T7944] Cannot create hsr debugfs directory [ 197.537518][ T7989] loop1: detected capacity change from 0 to 131072 [ 197.543043][ T7989] F2FS-fs (loop1): QUOTA feature is enabled, so ignore qf_name [ 197.549620][ T7989] F2FS-fs (loop1): invalid crc value [ 197.558854][ T7989] F2FS-fs (loop1): Disable nat_bits due to incorrect cp_ver (15359802341028777995, 275811881701387) [ 197.583269][ T7989] F2FS-fs (loop1): Mounted with checkpoint version = 753bd00b [ 197.825568][ T7945] hsr_slave_0: entered promiscuous mode [ 197.856868][ T6407] Bluetooth: hci0: command tx timeout [ 197.858468][ T6407] Bluetooth: hci3: command tx timeout [ 197.867066][ T7945] hsr_slave_1: entered promiscuous mode [ 197.906969][ T7945] debugfs: Directory 'hsr0' with parent 'hsr' already present! [ 197.909139][ T7945] Cannot create hsr debugfs directory [ 197.917189][ T7949] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 197.919076][ T7949] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 197.926173][ T7949] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 197.938415][ T7949] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 197.940303][ T7949] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 197.955043][ T7949] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 197.966916][ T6401] Bluetooth: hci7: command tx timeout [ 198.090093][ T43] hsr_slave_0: left promiscuous mode [ 198.126880][ T43] hsr_slave_1: left promiscuous mode [ 198.227114][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 198.229855][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 198.233253][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 198.235322][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 198.261754][ T43] veth1_macvtap: left promiscuous mode [ 198.264258][ T43] veth0_macvtap: left promiscuous mode [ 198.266240][ T43] veth1_vlan: left promiscuous mode [ 198.268104][ T43] veth0_vlan: left promiscuous mode [ 199.936886][ T6401] Bluetooth: hci3: command tx timeout [ 199.938533][ T6401] Bluetooth: hci0: command tx timeout [ 200.016810][ T6407] Bluetooth: hci7: command tx timeout [ 200.301091][ T43] team0 (unregistering): Port device team_slave_1 removed [ 202.958714][ T7949] hsr_slave_0: entered promiscuous mode [ 202.997241][ T7949] hsr_slave_1: entered promiscuous mode [ 203.036861][ T7949] debugfs: Directory 'hsr0' with parent 'hsr' already present! [ 203.038873][ T7949] Cannot create hsr debugfs directory [ 203.863842][ T7944] netdevsim netdevsim3 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 203.875071][ T7872] netdevsim netdevsim0 netdevsim0: renamed from eth0 [ 203.878911][ T7872] netdevsim netdevsim0 netdevsim1: renamed from eth1 [ 203.962338][ T7872] netdevsim netdevsim0 netdevsim2: renamed from eth2 [ 203.973878][ T7872] netdevsim netdevsim0 netdevsim3: renamed from eth3 [ 204.121830][ T7944] netdevsim netdevsim3 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 204.236916][ T8024] netlink: 4 bytes leftover after parsing attributes in process `syz.1.239'. [ 204.360160][ T7944] netdevsim netdevsim3 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 204.452334][ T7944] netdevsim netdevsim3 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 204.720739][ T7872] 8021q: adding VLAN 0 to HW filter on device bond0 [ 205.070737][ T7872] 8021q: adding VLAN 0 to HW filter on device team0 [ 205.726093][ T303] bridge0: port 1(bridge_slave_0) entered blocking state [ 205.728179][ T303] bridge0: port 1(bridge_slave_0) entered forwarding state [ 205.735360][ T303] bridge0: port 2(bridge_slave_1) entered blocking state [ 205.737355][ T303] bridge0: port 2(bridge_slave_1) entered forwarding state [ 205.776502][ T7872] hsr0: Slave A (hsr_slave_0) is not up; please bring it up to get a fully working HSR network [ 205.820189][ T7872] hsr0: Slave B (hsr_slave_1) is not up; please bring it up to get a fully working HSR network [ 205.929901][ T43] netdevsim netdevsim2 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 206.093107][ T43] netdevsim netdevsim2 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 206.119333][ T7944] netdevsim netdevsim3 netdevsim0: renamed from eth0 [ 206.126542][ T8036] loop1: detected capacity change from 0 to 40427 [ 206.128578][ T7944] netdevsim netdevsim3 netdevsim1: renamed from eth1 [ 206.131496][ T8036] F2FS-fs (loop1): Invalid log_blocksize (268), supports only 12 [ 206.133684][ T8036] F2FS-fs (loop1): Can't find valid F2FS filesystem in 1th superblock [ 206.139908][ T8036] F2FS-fs (loop1): Found nat_bits in checkpoint [ 206.151237][ T7944] netdevsim netdevsim3 netdevsim2: renamed from eth2 [ 206.160728][ T8036] F2FS-fs (loop1): Try to recover 1th superblock, ret: 0 [ 206.162888][ T8036] F2FS-fs (loop1): Mounted with checkpoint version = 48b305e5 [ 206.214609][ T8036] syz.1.241: attempt to access beyond end of device [ 206.214609][ T8036] loop1: rw=2049, sector=45096, nr_sectors = 8 limit=40427 [ 206.241223][ T43] netdevsim netdevsim2 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 206.271041][ T7944] netdevsim netdevsim3 netdevsim3: renamed from eth3 [ 206.293130][ T7872] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 206.325497][ T8036] syz.1.241: attempt to access beyond end of device [ 206.325497][ T8036] loop1: rw=2049, sector=40960, nr_sectors = 8 limit=40427 [ 206.343348][ T8036] F2FS-fs (loop1): Stopped filesystem due to reason: 3 [ 206.369065][ T43] netdevsim netdevsim2 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 206.449428][ T7872] veth0_vlan: entered promiscuous mode [ 206.463915][ T7872] veth1_vlan: entered promiscuous mode [ 206.494875][ T7944] 8021q: adding VLAN 0 to HW filter on device bond0 [ 206.524447][ T7872] veth0_macvtap: entered promiscuous mode [ 206.555373][ T7944] 8021q: adding VLAN 0 to HW filter on device team0 [ 206.615509][ T7872] veth1_macvtap: entered promiscuous mode [ 206.619685][ T303] bridge0: port 1(bridge_slave_0) entered blocking state [ 206.621743][ T303] bridge0: port 1(bridge_slave_0) entered forwarding state [ 206.624645][ T303] bridge0: port 2(bridge_slave_1) entered blocking state [ 206.626628][ T303] bridge0: port 2(bridge_slave_1) entered forwarding state [ 206.690463][ T7872] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 206.693476][ T7872] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 206.696247][ T7872] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 206.708068][ T7872] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 206.710853][ T7872] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 206.713697][ T7872] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 206.716336][ T7872] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 206.719804][ T7872] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 206.723614][ T7872] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 206.732242][ T7944] hsr0: Slave A (hsr_slave_0) is not up; please bring it up to get a fully working HSR network [ 206.735130][ T7944] hsr0: Slave B (hsr_slave_1) is not up; please bring it up to get a fully working HSR network [ 206.830393][ T43] netdevsim netdevsim4 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 206.892598][ T7872] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 206.895860][ T7872] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 206.902183][ T7872] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 206.905245][ T7872] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 206.908955][ T7872] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 206.912362][ T7872] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 206.915271][ T7872] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 206.918417][ T7872] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 206.923489][ T7872] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 206.970547][ T43] netdevsim netdevsim4 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 206.987681][ T7872] netdevsim netdevsim0 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 206.995145][ T7872] netdevsim netdevsim0 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 207.004337][ T7872] netdevsim netdevsim0 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 207.008435][ T7872] netdevsim netdevsim0 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 207.132174][ T43] netdevsim netdevsim4 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 207.185776][ T7944] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 207.258582][ T43] netdevsim netdevsim4 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 207.313196][ T323] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 207.315450][ T323] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 207.352279][ T7944] veth0_vlan: entered promiscuous mode [ 207.380571][ T7944] veth1_vlan: entered promiscuous mode [ 207.387939][ T11] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 207.390371][ T11] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 207.427224][ T8094] loop1: detected capacity change from 0 to 32768 [ 207.439029][ T8094] BTRFS: device fsid 5e4b7888-5e56-43f0-8345-635ad0fd87c6 devid 1 transid 8 /dev/loop1 (7:1) scanned by syz.1.244 (8094) [ 207.445630][ T8094] BTRFS info (device loop1): first mount of filesystem 5e4b7888-5e56-43f0-8345-635ad0fd87c6 [ 207.462561][ T7944] veth0_macvtap: entered promiscuous mode [ 207.466311][ T7944] veth1_macvtap: entered promiscuous mode [ 207.469392][ T8094] BTRFS info (device loop1): using blake2b (blake2b-256-generic) checksum algorithm [ 207.477794][ T8094] BTRFS info (device loop1): using free-space-tree [ 207.506232][ T7944] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 207.510951][ T7944] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 207.527733][ T8106] netlink: 'syz.0.214': attribute type 1 has an invalid length. [ 207.527832][ T7944] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 207.536347][ T7944] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 207.539969][ T7944] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 207.542935][ T7944] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 207.545561][ T7944] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 207.552848][ T7944] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 207.557047][ T7944] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 207.559844][ T7944] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 207.581189][ T7944] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 207.584354][ T43] bridge_slave_1: left allmulticast mode [ 207.585957][ T43] bridge_slave_1: left promiscuous mode [ 207.592501][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 207.603305][ T43] bridge_slave_0: left allmulticast mode [ 207.604866][ T43] bridge_slave_0: left promiscuous mode [ 207.606572][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 207.627592][ T43] bridge_slave_1: left allmulticast mode [ 207.631552][ T43] bridge_slave_1: left promiscuous mode [ 207.639368][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 207.662566][ T43] bridge_slave_0: left allmulticast mode [ 207.664164][ T43] bridge_slave_0: left promiscuous mode [ 207.665812][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 207.709883][ T43] bridge_slave_1: left allmulticast mode [ 207.711386][ T43] bridge_slave_1: left promiscuous mode [ 207.713044][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 207.731090][ T43] bridge_slave_0: left allmulticast mode [ 207.732751][ T43] bridge_slave_0: left promiscuous mode [ 207.734385][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 207.829314][ T229] BTRFS info (device loop1): qgroup scan completed (inconsistency flag cleared) [ 207.855736][ T8094] BTRFS info (device loop1): balance: start -d -m [ 207.910421][ T8094] BTRFS info (device loop1): relocating block group 6881280 flags data|metadata [ 207.971736][ T8094] BTRFS info (device loop1): relocating block group 5242880 flags data|metadata [ 208.060099][ T8094] BTRFS info (device loop1): found 10 extents, stage: move data extents [ 208.131961][ T8094] BTRFS info (device loop1): found 1 extents, stage: update data pointers [ 208.191112][ T8094] BTRFS info (device loop1): balance: ended with status: 0 [ 208.254023][ T30] audit: type=1326 audit(208.230:4): auid=4294967295 uid=0 gid=0 ses=4294967295 subj=_ pid=8093 comm="syz.1.244" exe="/root/syz-executor" sig=9 arch=c00000b7 syscall=98 compat=0 ip=0xffffb1b45ee8 code=0x0 [ 208.487887][ T7599] BTRFS info (device loop1): last unmount of filesystem 5e4b7888-5e56-43f0-8345-635ad0fd87c6 [ 208.529033][ T43] ip6gretap0 (unregistering): left allmulticast mode [ 209.561556][ T8145] loop1: detected capacity change from 0 to 128 [ 209.563918][ T8145] EXT4-fs: Ignoring removed orlov option [ 209.583659][ T8145] EXT4-fs (loop1): mounted filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09 r/w without journal. Quota mode: none. [ 209.595036][ T8145] EXT4-fs warning (device loop1): ext4_dirblock_csum_verify:406: inode #2: comm syz.1.248: No space for directory leaf checksum. Please run e2fsck -D. [ 209.604148][ T8145] EXT4-fs error (device loop1): __ext4_find_entry:1652: inode #2: comm syz.1.248: checksumming directory block 0 [ 209.614101][ T8145] EXT4-fs warning (device loop1): ext4_dirblock_csum_verify:406: inode #2: comm syz.1.248: No space for directory leaf checksum. Please run e2fsck -D. [ 209.619136][ T8145] EXT4-fs error (device loop1): __ext4_find_entry:1652: inode #2: comm syz.1.248: checksumming directory block 0 [ 209.626471][ T8145] EXT4-fs warning (device loop1): ext4_dirblock_csum_verify:406: inode #2: comm syz.1.248: No space for directory leaf checksum. Please run e2fsck -D. [ 209.632073][ T8145] EXT4-fs error (device loop1): __ext4_find_entry:1652: inode #2: comm syz.1.248: checksumming directory block 0 [ 209.642035][ T8145] EXT4-fs warning (device loop1): ext4_dirblock_csum_verify:406: inode #2: comm syz.1.248: No space for directory leaf checksum. Please run e2fsck -D. [ 209.646803][ T8145] EXT4-fs error (device loop1): __ext4_find_entry:1652: inode #2: comm syz.1.248: checksumming directory block 0 [ 209.651341][ T8145] EXT4-fs warning (device loop1): ext4_dirblock_csum_verify:406: inode #2: comm syz.1.248: No space for directory leaf checksum. Please run e2fsck -D. [ 209.655609][ T8145] EXT4-fs error (device loop1): __ext4_find_entry:1652: inode #2: comm syz.1.248: checksumming directory block 0 [ 209.660812][ T8145] EXT4-fs warning (device loop1): ext4_dirblock_csum_verify:406: inode #2: comm syz.1.248: No space for directory leaf checksum. Please run e2fsck -D. [ 209.664887][ T8145] EXT4-fs error (device loop1): __ext4_find_entry:1652: inode #2: comm syz.1.248: checksumming directory block 0 [ 209.669890][ T8145] EXT4-fs warning (device loop1): ext4_dirblock_csum_verify:406: inode #2: comm syz.1.248: No space for directory leaf checksum. Please run e2fsck -D. [ 209.674157][ T8145] EXT4-fs error (device loop1): __ext4_find_entry:1652: inode #2: comm syz.1.248: checksumming directory block 0 [ 209.683983][ T8145] EXT4-fs warning (device loop1): ext4_dirblock_csum_verify:406: inode #2: comm syz.1.248: No space for directory leaf checksum. Please run e2fsck -D. [ 209.688551][ T8145] EXT4-fs error (device loop1): __ext4_find_entry:1652: inode #2: comm syz.1.248: checksumming directory block 0 [ 209.701319][ T8145] EXT4-fs warning (device loop1): ext4_dirblock_csum_verify:406: inode #2: comm syz.1.248: No space for directory leaf checksum. Please run e2fsck -D. [ 209.705512][ T8145] EXT4-fs error (device loop1): __ext4_find_entry:1652: inode #2: comm syz.1.248: checksumming directory block 0 [ 209.710858][ T8145] EXT4-fs warning (device loop1): ext4_dirblock_csum_verify:406: inode #2: comm syz.1.248: No space for directory leaf checksum. Please run e2fsck -D. [ 209.715028][ T8145] EXT4-fs error (device loop1): __ext4_find_entry:1652: inode #2: comm syz.1.248: checksumming directory block 0 [ 209.742448][ T7599] EXT4-fs (loop1): unmounting filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09. [ 212.343730][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 212.380169][ T43] bond0 (unregistering): (slave bond_slave_1): Releasing backup interface [ 212.429271][ T43] bond0 (unregistering): Released all slaves [ 213.160226][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 213.200087][ T43] bond0 (unregistering): (slave bond_slave_1): Releasing backup interface [ 213.254535][ T43] bond0 (unregistering): Released all slaves [ 213.979890][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 214.019273][ T43] bond0 (unregistering): (slave bond_slave_1): Releasing backup interface [ 214.059427][ T43] bond0 (unregistering): (slave team0): Releasing backup interface [ 214.100425][ T43] bond0 (unregistering): Released all slaves [ 214.114397][ T7944] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 214.117538][ T7944] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 214.120114][ T7944] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 214.122949][ T7944] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 214.125672][ T7944] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 214.128890][ T7944] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 214.131619][ T7944] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 214.134454][ T7944] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 214.137595][ T7944] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 214.140282][ T7944] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 214.144104][ T7944] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 214.154747][ T7944] netdevsim netdevsim3 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 214.157818][ T7944] netdevsim netdevsim3 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 214.160140][ T7944] netdevsim netdevsim3 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 214.162483][ T7944] netdevsim netdevsim3 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 214.507028][ T8161] Trying to write to read-only block-device nullb0 [ 215.083694][ T8182] loop1: detected capacity change from 0 to 128 [ 215.152748][ T8182] EXT4-fs (loop1): mounted filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09 r/w without journal. Quota mode: none. [ 215.425260][ T8] usb 1-1: new high-speed USB device number 4 using dummy_hcd [ 215.520742][ T7945] netdevsim netdevsim2 netdevsim0: renamed from eth0 [ 215.545527][ T7945] netdevsim netdevsim2 netdevsim1: renamed from eth1 [ 215.555730][ T7945] netdevsim netdevsim2 netdevsim2: renamed from eth2 [ 215.555986][ T323] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 215.578711][ T323] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 215.580962][ T7599] EXT4-fs (loop1): unmounting filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09. [ 215.609740][ T8] usb 1-1: config 0 interface 0 altsetting 0 endpoint 0x81 has an invalid bInterval 0, changing to 7 [ 215.612992][ T8] usb 1-1: config 0 interface 0 altsetting 0 has 1 endpoint descriptor, different from the interface descriptor's value: 21 [ 215.616414][ T8] usb 1-1: New USB device found, idVendor=047f, idProduct=ffff, bcdDevice= 0.00 [ 215.635574][ T8] usb 1-1: New USB device strings: Mfr=0, Product=0, SerialNumber=0 [ 215.638481][ T7945] netdevsim netdevsim2 netdevsim3: renamed from eth3 [ 215.658024][ T8] usb 1-1: config 0 descriptor?? [ 215.765738][ T303] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 215.779283][ T303] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 216.825796][ T8] plantronics 0003:047F:FFFF.0003: unknown main item tag 0x0 [ 216.837078][ T8] plantronics 0003:047F:FFFF.0003: unknown main item tag 0x0 [ 216.839298][ T8] plantronics 0003:047F:FFFF.0003: No inputs registered, leaving [ 216.862699][ T8] plantronics 0003:047F:FFFF.0003: hiddev0,hidraw0: USB HID v0.40 Device [HID 047f:ffff] on usb-dummy_hcd.0-1/input0 [ 216.929865][ T8205] loop1: detected capacity change from 0 to 32768 [ 216.937759][ T8205] BTRFS: device fsid c9fe44da-de57-406a-8241-57ec7d4412cf devid 1 transid 8 /dev/loop1 (7:1) scanned by syz.1.256 (8205) [ 216.945728][ T8205] BTRFS info (device loop1): first mount of filesystem c9fe44da-de57-406a-8241-57ec7d4412cf [ 216.950758][ T8205] BTRFS info (device loop1): using crc32c (crc32c-generic) checksum algorithm [ 216.953762][ T8205] BTRFS info (device loop1): using free-space-tree [ 217.111119][ T7599] BTRFS info (device loop1): last unmount of filesystem c9fe44da-de57-406a-8241-57ec7d4412cf [ 217.149918][ T8] usb 1-1: USB disconnect, device number 4 [ 217.414919][ T8245] UDC core: USB Raw Gadget: couldn't find an available UDC or it's busy [ 217.447428][ T8245] misc raw-gadget: fail, usb_gadget_register_driver returned -16 [ 217.462406][ T7945] 8021q: adding VLAN 0 to HW filter on device bond0 [ 217.695474][ T7945] 8021q: adding VLAN 0 to HW filter on device team0 [ 217.770592][ T7949] netdevsim netdevsim4 netdevsim0: renamed from eth0 [ 217.784027][ T8252] loop0: detected capacity change from 0 to 1024 [ 217.788301][ T8252] EXT4-fs (loop0): stripe (65535) is not aligned with cluster size (16), stripe is disabled [ 217.791610][ T8252] JBD2: no valid journal superblock found [ 217.793171][ T8252] EXT4-fs (loop0): Could not load journal inode [ 217.810931][ T8143] bridge0: port 1(bridge_slave_0) entered blocking state [ 217.812960][ T8143] bridge0: port 1(bridge_slave_0) entered forwarding state [ 217.815852][ T8143] bridge0: port 2(bridge_slave_1) entered blocking state [ 217.817794][ T8143] bridge0: port 2(bridge_slave_1) entered forwarding state [ 217.841036][ T7949] netdevsim netdevsim4 netdevsim1: renamed from eth1 [ 218.132268][ T7949] netdevsim netdevsim4 netdevsim2: renamed from eth2 [ 218.138407][ T7949] netdevsim netdevsim4 netdevsim3: renamed from eth3 [ 218.737987][ T43] hsr_slave_0: left promiscuous mode [ 218.796866][ T43] hsr_slave_1: left promiscuous mode [ 218.917271][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 218.922844][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 218.935515][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 218.942368][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 218.958226][ T43] hsr_slave_0: left promiscuous mode [ 219.001620][ T8258] loop1: detected capacity change from 0 to 32768 [ 219.003899][ T8258] jfs: Unrecognized mount option "000000000000000000000000x0000000000000000" or missing value [ 219.007572][ T43] hsr_slave_1: left promiscuous mode [ 219.078060][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 219.084308][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 219.087620][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 219.089779][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 219.105387][ T43] hsr_slave_0: left promiscuous mode [ 219.129830][ T43] hsr_slave_1: left promiscuous mode [ 219.136302][ T8260] loop3: detected capacity change from 0 to 32768 [ 219.171867][ T8260] XFS (loop3): Mounting V5 Filesystem bfdc47fc-10d8-4eed-a562-11a831b3f791 [ 219.197126][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 219.199269][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 219.203385][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 219.206046][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 219.225524][ T8260] XFS (loop3): Ending clean mount [ 219.235397][ T8260] XFS (loop3): Quotacheck needed: Please wait. [ 219.304700][ T43] veth1_macvtap: left promiscuous mode [ 219.306527][ T43] veth0_macvtap: left promiscuous mode [ 219.309584][ T8260] XFS (loop3): Quotacheck: Done. [ 219.311686][ T43] veth1_vlan: left promiscuous mode [ 219.313178][ T43] veth0_vlan: left promiscuous mode [ 219.315399][ T43] veth1_macvtap: left promiscuous mode [ 219.323893][ T43] veth0_macvtap: left promiscuous mode [ 219.325471][ T43] veth1_vlan: left promiscuous mode [ 219.328423][ T43] veth0_vlan: left promiscuous mode [ 219.330680][ T43] veth1_macvtap: left promiscuous mode [ 219.332178][ T43] veth0_macvtap: left promiscuous mode [ 219.333765][ T43] veth1_vlan: left promiscuous mode [ 219.335258][ T43] veth0_vlan: left promiscuous mode [ 219.936983][ T6401] Bluetooth: hci2: command 0x1003 tx timeout [ 219.947145][ T6407] Bluetooth: hci2: Opcode 0x1003 failed: -110 [ 220.056990][ T7944] XFS (loop3): Unmounting Filesystem bfdc47fc-10d8-4eed-a562-11a831b3f791 [ 220.185271][ T8280] loop0: detected capacity change from 0 to 1024 [ 220.891960][ T323] hfsplus: b-tree write err: -5, ino 4 [ 223.477916][ T8306] netlink: 12 bytes leftover after parsing attributes in process `syz.3.273'. [ 223.995850][ T43] team0 (unregistering): Port device team_slave_1 removed [ 224.116610][ T8308] loop3: detected capacity change from 0 to 128 [ 224.147757][ T8308] EXT4-fs (loop3): mounted filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09 r/w without journal. Quota mode: none. [ 224.219570][ T43] team0 (unregistering): Port device team_slave_0 removed [ 224.243525][ T7944] EXT4-fs (loop3): unmounting filesystem 76b65be2-f6da-4727-8c75-0525a5b65a09. [ 228.773737][ T43] team0 (unregistering): Port device team_slave_1 removed [ 229.001132][ T43] team0 (unregistering): Port device team_slave_0 removed [ 231.092871][ T6401] Bluetooth: hci2: unexpected cc 0x0c03 length: 249 > 1 [ 231.107110][ T6401] Bluetooth: hci2: unexpected cc 0x1003 length: 249 > 9 [ 231.110076][ T6401] Bluetooth: hci2: unexpected cc 0x1001 length: 249 > 9 [ 231.112819][ T6401] Bluetooth: hci2: unexpected cc 0x0c23 length: 249 > 4 [ 231.115783][ T6401] Bluetooth: hci2: unexpected cc 0x0c25 length: 249 > 3 [ 231.117932][ T6401] Bluetooth: hci2: unexpected cc 0x0c38 length: 249 > 2 [ 231.602139][ T6401] Bluetooth: hci4: unexpected cc 0x0c03 length: 249 > 1 [ 231.607130][ T6401] Bluetooth: hci4: unexpected cc 0x1003 length: 249 > 9 [ 231.610067][ T6401] Bluetooth: hci4: unexpected cc 0x1001 length: 249 > 9 [ 231.612780][ T6401] Bluetooth: hci4: unexpected cc 0x0c23 length: 249 > 4 [ 231.615183][ T6401] Bluetooth: hci4: unexpected cc 0x0c25 length: 249 > 3 [ 231.623476][ T6401] Bluetooth: hci4: unexpected cc 0x0c38 length: 249 > 2 [ 233.136921][ T6401] Bluetooth: hci2: command tx timeout [ 233.696851][ T6401] Bluetooth: hci4: command tx timeout [ 233.882723][ T43] team0 (unregistering): Port device team_slave_1 removed [ 234.068684][ T43] team0 (unregistering): Port device team_slave_0 removed [ 234.595469][ T6407] Bluetooth: hci5: unexpected cc 0x0c03 length: 249 > 1 [ 234.601592][ T6407] Bluetooth: hci5: unexpected cc 0x1003 length: 249 > 9 [ 234.604493][ T6407] Bluetooth: hci5: unexpected cc 0x1001 length: 249 > 9 [ 234.612414][ T6407] Bluetooth: hci5: unexpected cc 0x0c23 length: 249 > 4 [ 234.614834][ T6407] Bluetooth: hci5: unexpected cc 0x0c25 length: 249 > 3 [ 234.621733][ T6407] Bluetooth: hci5: unexpected cc 0x0c38 length: 249 > 2 [ 235.216871][ T6407] Bluetooth: hci2: command tx timeout [ 235.786865][ T6407] Bluetooth: hci4: command tx timeout [ 236.668736][ T6401] Bluetooth: hci5: command tx timeout [ 236.919791][ T8312] netlink: 'syz.3.275': attribute type 1 has an invalid length. [ 237.236954][ T8314] bond0: (slave bond_slave_1): Releasing backup interface [ 237.296892][ T6401] Bluetooth: hci2: command tx timeout [ 237.484526][ T7945] hsr0: Slave B (hsr_slave_1) is not up; please bring it up to get a fully working HSR network [ 237.528095][ T7949] 8021q: adding VLAN 0 to HW filter on device bond0 [ 237.746157][ T7949] 8021q: adding VLAN 0 to HW filter on device team0 [ 237.773075][ T303] bridge0: port 1(bridge_slave_0) entered blocking state [ 237.774973][ T303] bridge0: port 1(bridge_slave_0) entered forwarding state [ 237.778703][ T303] bridge0: port 2(bridge_slave_1) entered blocking state [ 237.780674][ T303] bridge0: port 2(bridge_slave_1) entered forwarding state [ 237.806537][ T8320] chnl_net:caif_netlink_parms(): no params data found [ 237.826309][ T7945] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 237.856893][ T6407] Bluetooth: hci4: command tx timeout [ 237.860147][ T7949] hsr0: Slave A (hsr_slave_0) is not up; please bring it up to get a fully working HSR network [ 237.862820][ T7949] hsr0: Slave B (hsr_slave_1) is not up; please bring it up to get a fully working HSR network [ 237.885616][ T8316] chnl_net:caif_netlink_parms(): no params data found [ 237.904932][ T8323] chnl_net:caif_netlink_parms(): no params data found [ 237.954947][ T7945] veth0_vlan: entered promiscuous mode [ 238.005679][ T7945] veth1_vlan: entered promiscuous mode [ 238.045333][ T8316] bridge0: port 1(bridge_slave_0) entered blocking state [ 238.048155][ T8316] bridge0: port 1(bridge_slave_0) entered disabled state [ 238.050366][ T8316] bridge_slave_0: entered allmulticast mode [ 238.052512][ T8316] bridge_slave_0: entered promiscuous mode [ 238.069013][ T8320] bridge0: port 1(bridge_slave_0) entered blocking state [ 238.071014][ T8320] bridge0: port 1(bridge_slave_0) entered disabled state [ 238.073071][ T8320] bridge_slave_0: entered allmulticast mode [ 238.075345][ T8320] bridge_slave_0: entered promiscuous mode [ 238.107590][ T8316] bridge0: port 2(bridge_slave_1) entered blocking state [ 238.109640][ T8316] bridge0: port 2(bridge_slave_1) entered disabled state [ 238.111718][ T8316] bridge_slave_1: entered allmulticast mode [ 238.114302][ T8316] bridge_slave_1: entered promiscuous mode [ 238.116396][ T8320] bridge0: port 2(bridge_slave_1) entered blocking state [ 238.118768][ T8320] bridge0: port 2(bridge_slave_1) entered disabled state [ 238.120714][ T8320] bridge_slave_1: entered allmulticast mode [ 238.122812][ T8320] bridge_slave_1: entered promiscuous mode [ 238.155261][ T8323] bridge0: port 1(bridge_slave_0) entered blocking state [ 238.157773][ T8323] bridge0: port 1(bridge_slave_0) entered disabled state [ 238.159874][ T8323] bridge_slave_0: entered allmulticast mode [ 238.162026][ T8323] bridge_slave_0: entered promiscuous mode [ 238.172680][ T7949] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 238.184574][ T8320] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 238.200102][ T8320] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 238.214926][ T8323] bridge0: port 2(bridge_slave_1) entered blocking state [ 238.217257][ T8323] bridge0: port 2(bridge_slave_1) entered disabled state [ 238.219281][ T8323] bridge_slave_1: entered allmulticast mode [ 238.221477][ T8323] bridge_slave_1: entered promiscuous mode [ 238.232661][ T8316] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 238.249449][ T7945] veth0_macvtap: entered promiscuous mode [ 238.261848][ T8320] team0: Port device team_slave_0 added [ 238.298391][ T8316] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 238.319169][ T8320] team0: Port device team_slave_1 added [ 238.334786][ T8323] bond0: (slave bond_slave_0): Enslaving as an active interface with an up link [ 238.339724][ T7945] veth1_macvtap: entered promiscuous mode [ 238.346422][ T7945] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 238.350709][ T7945] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 238.353321][ T7945] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 238.356533][ T7945] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 238.372342][ T7945] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 238.375138][ T7945] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 238.379358][ T7945] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 238.390309][ T8316] team0: Port device team_slave_0 added [ 238.395818][ T8316] team0: Port device team_slave_1 added [ 238.401848][ T8323] bond0: (slave bond_slave_1): Enslaving as an active interface with an up link [ 238.444255][ T7945] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 238.447446][ T7945] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 238.450148][ T7945] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 238.452905][ T7945] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 238.455407][ T7945] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 238.459250][ T7945] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 238.463027][ T7945] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 238.483574][ T8316] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 238.485538][ T8316] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 238.495627][ T8316] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 238.506890][ T8320] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 238.508872][ T8320] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 238.516387][ T8320] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 238.544948][ T8323] team0: Port device team_slave_0 added [ 238.547337][ T8316] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 238.549657][ T8316] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 238.556963][ T8316] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 238.578607][ T7945] netdevsim netdevsim2 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 238.581099][ T7945] netdevsim netdevsim2 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 238.583456][ T7945] netdevsim netdevsim2 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 238.585757][ T7945] netdevsim netdevsim2 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 238.595896][ T8320] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 238.598358][ T8320] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 238.605377][ T8320] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 238.621196][ T8323] team0: Port device team_slave_1 added [ 238.688567][ T8316] hsr_slave_0: entered promiscuous mode [ 238.717158][ T8316] hsr_slave_1: entered promiscuous mode [ 238.736952][ T6407] Bluetooth: hci5: command tx timeout [ 238.774948][ T8323] batman_adv: batadv0: Adding interface: batadv_slave_0 [ 238.776948][ T8323] batman_adv: batadv0: The MTU of interface batadv_slave_0 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 238.783829][ T8323] batman_adv: batadv0: Not using interface batadv_slave_0 (retrying later): interface not active [ 238.798615][ T8323] batman_adv: batadv0: Adding interface: batadv_slave_1 [ 238.800509][ T8323] batman_adv: batadv0: The MTU of interface batadv_slave_1 is too small (1500) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to 1560 would solve the problem. [ 238.807767][ T8323] batman_adv: batadv0: Not using interface batadv_slave_1 (retrying later): interface not active [ 238.888542][ T8320] hsr_slave_0: entered promiscuous mode [ 238.917631][ T8320] hsr_slave_1: entered promiscuous mode [ 238.956794][ T8320] debugfs: Directory 'hsr0' with parent 'hsr' already present! [ 238.958794][ T8320] Cannot create hsr debugfs directory [ 239.028624][ T8323] hsr_slave_0: entered promiscuous mode [ 239.087210][ T8323] hsr_slave_1: entered promiscuous mode [ 239.127901][ T8323] debugfs: Directory 'hsr0' with parent 'hsr' already present! [ 239.129974][ T8323] Cannot create hsr debugfs directory [ 239.326384][ T7949] veth0_vlan: entered promiscuous mode [ 239.369127][ T662] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 239.371252][ T662] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 239.377053][ T6407] Bluetooth: hci2: command tx timeout [ 239.394773][ T7949] veth1_vlan: entered promiscuous mode [ 239.501497][ T8320] netdevsim netdevsim0 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 239.534729][ T8143] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 239.547158][ T8143] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 239.608396][ T8320] netdevsim netdevsim0 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 239.615974][ T7949] veth0_macvtap: entered promiscuous mode [ 239.986796][ T6407] Bluetooth: hci4: command tx timeout [ 240.570411][ T8320] netdevsim netdevsim0 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 240.578585][ T8363] netlink: 32 bytes leftover after parsing attributes in process `syz.2.279'. [ 240.602345][ T7949] veth1_macvtap: entered promiscuous mode [ 240.673929][ T43] netdevsim netdevsim3 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 240.752151][ T8320] netdevsim netdevsim0 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 240.817036][ T6407] Bluetooth: hci5: command tx timeout [ 240.898690][ T43] netdevsim netdevsim3 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 240.908096][ T7949] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 240.911069][ T7949] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 240.916797][ T7949] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 240.919980][ T7949] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 240.922619][ T7949] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 240.925453][ T7949] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 240.928567][ T7949] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 240.931331][ T7949] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 240.935141][ T7949] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 240.988781][ T7949] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 240.991581][ T7949] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 240.994290][ T7949] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 241.006236][ T7949] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 241.016449][ T7949] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 241.019042][ T8370] loop2: detected capacity change from 0 to 512 [ 241.027267][ T7949] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 241.030099][ T7949] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 241.033048][ T7949] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 241.039994][ T7949] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 241.120796][ T43] netdevsim netdevsim3 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 241.129650][ T7949] netdevsim netdevsim4 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 241.132143][ T7949] netdevsim netdevsim4 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 241.134544][ T7949] netdevsim netdevsim4 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 241.138158][ T7949] netdevsim netdevsim4 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 241.258631][ T43] netdevsim netdevsim3 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 241.725119][ T8316] netdevsim netdevsim1 netdevsim3 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 241.852399][ T8320] netdevsim netdevsim0 netdevsim0: renamed from eth0 [ 241.942028][ T8320] netdevsim netdevsim0 netdevsim1: renamed from eth1 [ 242.118762][ T8316] netdevsim netdevsim1 netdevsim2 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 242.123804][ T8320] netdevsim netdevsim0 netdevsim2: renamed from eth2 [ 242.131761][ T8320] netdevsim netdevsim0 netdevsim3: renamed from eth3 [ 242.165565][ T8320] 8021q: adding VLAN 0 to HW filter on device bond0 [ 242.173666][ T8320] 8021q: adding VLAN 0 to HW filter on device team0 [ 242.190661][ T8320] hsr0: Slave A (hsr_slave_0) is not up; please bring it up to get a fully working HSR network [ 242.193742][ T8320] hsr0: Slave B (hsr_slave_1) is not up; please bring it up to get a fully working HSR network [ 242.206798][ T303] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 242.214752][ T303] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 242.234996][ T8143] bridge0: port 1(bridge_slave_0) entered blocking state [ 242.237087][ T8143] bridge0: port 1(bridge_slave_0) entered forwarding state [ 242.244790][ T8143] bridge0: port 2(bridge_slave_1) entered blocking state [ 242.246840][ T8143] bridge0: port 2(bridge_slave_1) entered forwarding state [ 242.276300][ T229] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 242.280031][ T229] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 242.442235][ T8316] netdevsim netdevsim1 netdevsim1 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 242.898171][ T6407] Bluetooth: hci5: command tx timeout [ 242.911231][ T8316] netdevsim netdevsim1 netdevsim0 (unregistering): unset [1, 0] type 2 family 0 port 6081 - 0 [ 243.018481][ T8320] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 243.042223][ T43] bridge_slave_1: left allmulticast mode [ 243.043717][ T43] bridge_slave_1: left promiscuous mode [ 243.045332][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 243.083458][ T43] bridge_slave_0: left allmulticast mode [ 243.085055][ T43] bridge_slave_0: left promiscuous mode [ 243.088291][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 243.090294][ T8393] loop4: detected capacity change from 0 to 512 [ 243.103799][ T8393] EXT4-fs error (device loop4): ext4_orphan_get:1391: inode #15: comm syz.4.231: casefold flag without casefold feature [ 243.104079][ T43] bridge_slave_1: left allmulticast mode [ 243.109529][ T8393] EXT4-fs error (device loop4): ext4_orphan_get:1396: comm syz.4.231: couldn't read orphan inode 15 (err -117) [ 243.112276][ T43] bridge_slave_1: left promiscuous mode [ 243.113328][ T8393] EXT4-fs (loop4): mounted filesystem 00000000-0000-0000-0000-000000000000 r/w without journal. Quota mode: writeback. [ 243.114780][ T43] bridge0: port 2(bridge_slave_1) entered disabled state [ 243.121617][ T43] bridge_slave_0: left allmulticast mode [ 243.123208][ T43] bridge_slave_0: left promiscuous mode [ 243.124626][ T43] bridge0: port 1(bridge_slave_0) entered disabled state [ 243.938476][ T7949] EXT4-fs (loop4): unmounting filesystem 00000000-0000-0000-0000-000000000000. [ 246.039847][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 246.079467][ T43] bond0 (unregistering): Released all slaves [ 246.820277][ T43] bond0 (unregistering): (slave bond_slave_0): Releasing backup interface [ 246.859294][ T43] bond0 (unregistering): (slave bond_slave_1): Releasing backup interface [ 246.899151][ T43] bond0 (unregistering): Released all slaves [ 246.930017][ T8425] netlink: 8 bytes leftover after parsing attributes in process `syz.4.286'. [ 247.000109][ T8320] veth0_vlan: entered promiscuous mode [ 247.004983][ T8320] veth1_vlan: entered promiscuous mode [ 247.056154][ T8320] veth0_macvtap: entered promiscuous mode [ 247.089026][ T8320] veth1_macvtap: entered promiscuous mode [ 247.095768][ T8320] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 247.107470][ T8320] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 247.110565][ T8320] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 247.113304][ T8320] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 247.115961][ T8320] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 247.125480][ T8320] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 247.128421][ T8320] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 247.131163][ T8320] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 247.133757][ T8320] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 247.138378][ T8320] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 247.142157][ T8320] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 247.172536][ T8320] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 247.175258][ T8320] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 247.185377][ T8320] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 247.192649][ T8320] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 247.196865][ T8320] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 247.200632][ T8320] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 247.203217][ T8320] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 247.205860][ T8320] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 247.226709][ T8320] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 247.229630][ T8320] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 247.238248][ T8320] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 247.927459][ T8316] netdevsim netdevsim1 netdevsim0: renamed from eth0 [ 247.960025][ T8444] tipc: Started in network mode [ 247.961348][ T8444] tipc: Node identity ffffffff, cluster identity 4711 [ 247.963168][ T8444] tipc: Node number set to 4294967295 [ 247.968637][ T8447] mmap: syz.4.289 (8447): VmData 37490688 exceed data ulimit 0. Update limits or use boot option ignore_rlimit_data. [ 247.973981][ T8320] netdevsim netdevsim0 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 247.984368][ T8320] netdevsim netdevsim0 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 247.996778][ T8320] netdevsim netdevsim0 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 247.999462][ T8320] netdevsim netdevsim0 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 248.007179][ T8316] netdevsim netdevsim1 netdevsim1: renamed from eth1 [ 248.040118][ T8316] netdevsim netdevsim1 netdevsim2: renamed from eth2 [ 248.043813][ T8316] netdevsim netdevsim1 netdevsim3: renamed from eth3 [ 248.543508][ T8453] binder: 8452:8453 tried to acquire reference to desc 0, got 1 instead [ 248.618932][ T8453] binder: 8452:8453 got reply transaction with bad transaction stack, transaction 48 has target 8452:0 [ 248.670408][ T8453] binder: 8452:8453 transaction reply to 0:0 failed 49/29201/-71, size 0-0 line 3060 [ 248.713060][ T6399] binder: send failed reply for transaction 48 to 8452:8453 [ 248.735342][ T30] audit: type=1326 audit(248.710:5): auid=4294967295 uid=0 gid=0 ses=4294967295 subj=_ pid=8457 comm="syz.2.293" exe="/root/syz-executor" sig=31 arch=c00000b7 syscall=98 compat=0 ip=0xffffb2145ee8 code=0x0 [ 248.761559][ T6399] binder: undelivered TRANSACTION_ERROR: 29189 [ 248.819263][ T2335] ieee802154 phy0 wpan0: encryption failed: -22 [ 248.821083][ T2335] ieee802154 phy1 wpan1: encryption failed: -22 [ 249.818882][ T8464] netlink: 'syz.4.294': attribute type 12 has an invalid length. [ 249.821044][ T8464] netlink: 197276 bytes leftover after parsing attributes in process `syz.4.294'. [ 249.891799][ T8465] loop4: detected capacity change from 0 to 40427 [ 249.979396][ T8465] F2FS-fs (loop4): Found nat_bits in checkpoint [ 249.994714][ T8465] F2FS-fs (loop4): Mounted with checkpoint version = 48b305e5 [ 250.025439][ T662] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 250.042926][ T662] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 250.084235][ T44] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 250.093652][ T44] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 250.713989][ T7949] syz-executor: attempt to access beyond end of device [ 250.713989][ T7949] loop4: rw=2049, sector=45096, nr_sectors = 8 limit=40427 [ 250.729945][ T7949] F2FS-fs (loop4): Stopped filesystem due to reason: 3 [ 250.783211][ T43] hsr_slave_0: left promiscuous mode [ 250.825075][ T43] hsr_slave_1: left promiscuous mode [ 250.897654][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 250.899741][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 250.911090][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 250.912995][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 250.938040][ T43] hsr_slave_0: left promiscuous mode [ 250.950901][ T8481] loop4: detected capacity change from 0 to 64 [ 250.969708][ T43] hsr_slave_1: left promiscuous mode [ 251.037281][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_0 [ 251.039439][ T43] batman_adv: batadv0: Removing interface: batadv_slave_0 [ 251.045211][ T43] batman_adv: batadv0: Interface deactivated: batadv_slave_1 [ 251.054781][ T43] batman_adv: batadv0: Removing interface: batadv_slave_1 [ 251.069790][ T8481] block nbd4: shutting down sockets [ 251.092346][ T43] veth1_macvtap: left promiscuous mode [ 251.093916][ T43] veth0_macvtap: left promiscuous mode [ 251.095542][ T43] veth1_vlan: left promiscuous mode [ 251.097645][ T43] veth0_vlan: left promiscuous mode [ 251.101160][ T43] veth1_macvtap: left promiscuous mode [ 251.103114][ T43] veth0_macvtap: left promiscuous mode [ 251.104805][ T43] veth1_vlan: left promiscuous mode [ 251.106371][ T43] veth0_vlan: left promiscuous mode [ 251.184643][ T8491] loop4: detected capacity change from 0 to 24 [ 251.188876][ T8491] MTD: Attempt to mount non-MTD device "/dev/loop4" [ 251.206086][ T8491] romfs: Mounting image 'rom 637cf1fa' through the block layer [ 251.228089][ T8491] VFS: Lookup of 'file0' in romfs loop4 would have caused loop [ 251.287638][ T8494] VFS: Lookup of 'file0' in romfs loop4 would have caused loop [ 251.291386][ T8491] VFS: Lookup of 'file0' in romfs loop4 would have caused loop [ 251.293679][ T8491] VFS: Lookup of 'file0' in romfs loop4 would have caused loop [ 251.296005][ T8491] VFS: Lookup of 'file0' in romfs loop4 would have caused loop [ 251.301234][ T8491] VFS: Lookup of 'file0' in romfs loop4 would have caused loop [ 251.303586][ T8491] VFS: Lookup of 'file0' in romfs loop4 would have caused loop [ 251.305895][ T8491] VFS: Lookup of 'file0' in romfs loop4 would have caused loop [ 251.317555][ T8491] VFS: Lookup of 'file0' in romfs loop4 would have caused loop [ 251.320195][ T8491] VFS: Lookup of 'file0' in romfs loop4 would have caused loop [ 251.385066][ T8496] UDC core: USB Raw Gadget: couldn't find an available UDC or it's busy [ 251.418268][ T8496] misc raw-gadget: fail, usb_gadget_register_driver returned -16 [ 251.976423][ T8498] loop4: detected capacity change from 0 to 1024 [ 251.988475][ T8498] BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low! [ 251.989935][ T8498] turning off the locking correctness validator. [ 251.991592][ T8498] CPU: 0 UID: 0 PID: 8498 Comm: syz.4.300 Not tainted 6.11.0-rc5-syzkaller-gdf54f4a16f82 #0 [ 251.994352][ T8498] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 06/27/2024 [ 251.997217][ T8498] Call trace: [ 251.998159][ T8498] dump_backtrace+0x1b8/0x1e4 [ 251.999367][ T8498] show_stack+0x2c/0x3c [ 252.000647][ T8498] dump_stack_lvl+0xe4/0x150 [ 252.001865][ T8498] dump_stack+0x1c/0x28 [ 252.002982][ T8498] __lock_acquire+0x1fa0/0x779c [ 252.004359][ T8498] lock_acquire+0x240/0x728 [ 252.005578][ T8498] psi_group_change+0x264/0x11fc [ 252.006941][ T8498] psi_task_change+0x100/0x234 [ 252.008215][ T8498] enqueue_task+0x1b8/0x1d8 [ 252.009447][ T8498] ttwu_do_activate+0x194/0x69c [ 252.010889][ T8498] try_to_wake_up+0x52c/0xf2c [ 252.012160][ T8498] wake_up_process+0x18/0x24 [ 252.013446][ T8498] kick_pool+0x3f0/0x59c [ 252.014574][ T8498] __queue_work+0xe28/0x1308 [ 252.015833][ T8498] queue_work_on+0xe0/0x1a0 [ 252.017098][ T8498] loop_queue_rq+0x974/0xadc [ 252.018382][ T8498] blk_mq_request_issue_directly+0x31c/0x568 [ 252.020082][ T8498] blk_mq_plug_issue_direct+0x200/0x4ec [ 252.021579][ T8498] blk_mq_flush_plug_list+0x648/0x1348 [ 252.023103][ T8498] __blk_flush_plug+0x388/0x460 [ 252.024434][ T8498] __submit_bio+0x39c/0x4c4 [ 252.025703][ T8498] submit_bio_noacct_nocheck+0x3ac/0xca8 [ 252.027275][ T8498] submit_bio_noacct+0xc90/0x165c [ 252.028595][ T8498] submit_bio+0x374/0x564 [ 252.029721][ T8498] submit_bh_wbc+0x3f8/0x4c8 [ 252.031031][ T8498] block_read_full_folio+0xa2c/0xbe0 [ 252.032525][ T8498] hfsplus_read_folio+0x28/0x38 [ 252.033813][ T8498] filemap_read_folio+0x14c/0x39c [ 252.035227][ T8498] do_read_cache_folio+0x114/0x548 [ 252.036554][ T8498] read_cache_page+0x6c/0x15c [ 252.037835][ T8498] __hfs_bnode_create+0x3dc/0x6d4 [ 252.039303][ T8498] hfsplus_bnode_find+0x200/0xee4 [ 252.040828][ T8498] hfsplus_brec_find+0x134/0x4a0 [ 252.042287][ T8498] hfsplus_find_attr+0x1e0/0x33c [ 252.043645][ T8498] hfsplus_attr_exists+0x154/0x1c8 [ 252.045158][ T8498] __hfsplus_setxattr+0x380/0x1cf4 [ 252.046593][ T8498] hfsplus_initxattrs+0x150/0x20c [ 252.047974][ T8498] security_inode_init_security+0x210/0x3fc [ 252.049626][ T8498] hfsplus_init_security+0x40/0x54 [ 252.051057][ T8498] hfsplus_fill_super+0x1010/0x166c [ 252.052556][ T8498] mount_bdev+0x1d4/0x2a0 [ 252.053738][ T8498] hfsplus_mount+0x44/0x58 [ 252.054998][ T8498] legacy_get_tree+0xd4/0x16c [ 252.056310][ T8498] vfs_get_tree+0x90/0x28c [ 252.057512][ T8498] do_new_mount+0x278/0x900 [ 252.058734][ T8498] path_mount+0x590/0xe04 [ 252.060005][ T8498] __arm64_sys_mount+0x45c/0x5a8 [ 252.061392][ T8498] invoke_syscall+0x98/0x2b8 [ 252.062688][ T8498] el0_svc_common+0x130/0x23c [ 252.063991][ T8498] do_el0_svc+0x48/0x58 [ 252.065158][ T8498] el0_svc+0x54/0x168 [ 252.066303][ T8498] el0t_64_sync_handler+0x84/0xfc [ 252.067727][ T8498] el0t_64_sync+0x190/0x194 [ 252.806058][ T229] hfsplus: b-tree write err: -5, ino 4 [ 253.188042][ T43] team0 (unregistering): Port device team_slave_1 removed [ 253.387559][ T43] team0 (unregistering): Port device team_slave_0 removed [ 257.617552][ T43] team0 (unregistering): Port device team_slave_1 removed [ 257.818399][ T43] team0 (unregistering): Port device team_slave_0 removed [ 260.106607][ T8323] netdevsim netdevsim3 netdevsim0: renamed from eth0 [ 260.123930][ T8316] 8021q: adding VLAN 0 to HW filter on device bond0 [ 260.137147][ T8323] netdevsim netdevsim3 netdevsim1: renamed from eth1 [ 260.142135][ T8323] netdevsim netdevsim3 netdevsim2: renamed from eth2 [ 260.149691][ T8316] 8021q: adding VLAN 0 to HW filter on device team0 [ 260.155624][ T229] bridge0: port 1(bridge_slave_0) entered blocking state [ 260.157753][ T229] bridge0: port 1(bridge_slave_0) entered forwarding state [ 260.168334][ T229] bridge0: port 2(bridge_slave_1) entered blocking state [ 260.170337][ T229] bridge0: port 2(bridge_slave_1) entered forwarding state [ 260.172989][ T8323] netdevsim netdevsim3 netdevsim3: renamed from eth3 [ 260.205854][ T8316] hsr0: Slave A (hsr_slave_0) is not up; please bring it up to get a fully working HSR network [ 260.209248][ T8316] hsr0: Slave B (hsr_slave_1) is not up; please bring it up to get a fully working HSR network [ 260.245301][ T8323] 8021q: adding VLAN 0 to HW filter on device bond0 [ 260.264933][ T8323] 8021q: adding VLAN 0 to HW filter on device team0 [ 260.280342][ T8143] bridge0: port 1(bridge_slave_0) entered blocking state [ 260.282544][ T8143] bridge0: port 1(bridge_slave_0) entered forwarding state [ 260.308455][ T8143] bridge0: port 2(bridge_slave_1) entered blocking state [ 260.310373][ T8143] bridge0: port 2(bridge_slave_1) entered forwarding state [ 260.335192][ T8316] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 260.383695][ T8316] veth0_vlan: entered promiscuous mode [ 260.389680][ T8316] veth1_vlan: entered promiscuous mode [ 260.407668][ T8316] veth0_macvtap: entered promiscuous mode [ 260.425290][ T8316] veth1_macvtap: entered promiscuous mode [ 260.435892][ T8316] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 260.443471][ T8316] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.449527][ T8316] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 260.454716][ T8316] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.462647][ T8316] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 260.468956][ T8316] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.474006][ T8316] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 260.486843][ T8316] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.490383][ T8316] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 260.493739][ T8316] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 260.505688][ T8316] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.508566][ T8316] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 260.511430][ T8316] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.517495][ T8316] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 260.524178][ T8316] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.529961][ T8316] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 260.535600][ T8316] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.541418][ T8316] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 260.552146][ T8323] 8021q: adding VLAN 0 to HW filter on device batadv0 [ 260.571739][ T8316] netdevsim netdevsim1 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 260.574280][ T8316] netdevsim netdevsim1 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 260.578243][ T8316] netdevsim netdevsim1 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 260.580731][ T8316] netdevsim netdevsim1 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 260.623464][ T8323] veth0_vlan: entered promiscuous mode [ 260.636213][ T8316] ieee80211 phy48: Selected rate control algorithm 'minstrel_ht' [ 260.648088][ T8323] veth1_vlan: entered promiscuous mode [ 260.656279][ T8323] veth0_macvtap: entered promiscuous mode [ 260.667204][ T8316] ieee80211 phy49: Selected rate control algorithm 'minstrel_ht' [ 260.668457][ T44] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 260.671439][ T44] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 260.675771][ T8323] veth1_macvtap: entered promiscuous mode [ 260.692445][ T8143] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 260.696608][ T8143] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 260.703733][ T8323] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 260.707024][ T8323] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.709915][ T8323] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 260.712883][ T8323] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.715624][ T8323] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 260.719973][ T8323] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.722794][ T8323] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 260.728209][ T8323] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.731133][ T8323] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3e) already exists on: batadv_slave_0 [ 260.734133][ T8323] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.738212][ T8323] batman_adv: batadv0: Interface activated: batadv_slave_0 [ 260.748390][ T8323] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 260.752565][ T8323] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.755342][ T8323] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 260.762562][ T8323] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.765444][ T8323] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 260.769975][ T8323] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.772691][ T8323] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 260.775626][ T8323] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.779955][ T8323] batman_adv: The newly added mac address (aa:aa:aa:aa:aa:3f) already exists on: batadv_slave_1 [ 260.782840][ T8323] batman_adv: It is strongly recommended to keep mac addresses unique to avoid problems! [ 260.786408][ T8323] batman_adv: batadv0: Interface activated: batadv_slave_1 [ 260.790652][ T8323] netdevsim netdevsim3 netdevsim0: set [1, 0] type 2 family 0 port 6081 - 0 [ 260.793161][ T8323] netdevsim netdevsim3 netdevsim1: set [1, 0] type 2 family 0 port 6081 - 0 [ 260.795617][ T8323] netdevsim netdevsim3 netdevsim2: set [1, 0] type 2 family 0 port 6081 - 0 [ 260.802355][ T8323] netdevsim netdevsim3 netdevsim3: set [1, 0] type 2 family 0 port 6081 - 0 [ 260.890537][ T8323] ieee80211 phy50: Selected rate control algorithm 'minstrel_ht' [ 260.927221][ T11] wlan0: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 260.930250][ T8323] ieee80211 phy51: Selected rate control algorithm 'minstrel_ht' [ 260.934336][ T11] wlan0: Creating new IBSS network, BSSID 50:50:50:50:50:50 [ 260.967884][ T8143] wlan1: Created IBSS using preconfigured BSSID 50:50:50:50:50:50 [ 260.971698][ T8143] wlan1: Creating new IBSS network, BSSID 50:50:50:50:50:50 VM DIAGNOSIS: Warning: Permanently added '10.128.1.59' (ED25519) to the list of known hosts. lock-classes: 3197 [max: 8192] direct dependencies: 39058 [max: 131072] indirect dependencies: 262560 all direct dependencies: 1075354 dependency chains: 64382 [max: 65536] dependency chain hlocks used: 315710 [max: 327680] dependency chain hlocks lost: 0 in-hardirq chains: 166 in-softirq chains: 2290 in-process chains: 61890 stack-trace entries: 478868 [max: 1048576] number of stack traces: 21419 number of stack hash chains: 11910 combined max dependencies:hardirq-safe locks: 73 hardirq-unsafe locks: 2334 softirq-safe locks: 363 softirq-unsafe locks: 1869 irq-safe locks: 371 irq-unsafe locks: 2334 hardirq-read-safe locks: 5 hardirq-read-unsafe locks: 308 softirq-read-safe locks: 24 softirq-read-unsafe locks: 290 irq-read-safe locks: 24 irq-read-unsafe locks: 308 uncategorized locks: 486 unused locks: 0 max locking depth: 19 max bfs queue depth: 947 max lock class index: 3421 debug_locks: 0 zapped classes: 3645 zapped lock chains: 35584 large chain blocks: 0 all lock classes: FD: 1 BD: 504 -.-.: (console_sem).lock FD: 493 BD: 34 +.+.: console_lock ->pool_lock#2 ->&obj_hash[i].lock ->&____s->seqcount ->&c->lock ->kbd_event_lock ->(console_sem).lock ->console_owner_lock ->fs_reclaim ->&x->wait#8 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#9 ->&fb_info->lock ->vt_event_lock ->&base->lock ->subsys mutex#5 ->&helper->lock ->&helper->damage_lock ->&rq->__lock ->req_lock ->&p->pi_lock ->&x->wait#11 ->remove_cache_srcu ->subsys mutex#23 ->&n->list_lock FD: 1 BD: 1 ....: console_srcu FD: 30 BD: 1 +.+.: fill_pool_map-wait-type-override ->&____s->seqcount ->&c->lock ->pool_lock#2 ->pool_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&zone->lock ->&____s->seqcount#2 ->rcu_node_0 FD: 2 BD: 1775 -.-.: &obj_hash[i].lock ->pool_lock FD: 1 BD: 1771 -.-.: pool_lock FD: 1095 BD: 17 +.+.: cgroup_mutex ->pcpu_alloc_mutex ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&obj_hash[i].lock ->cgroup_file_kn_lock ->css_set_lock ->&c->lock ->&____s->seqcount ->blkcg_pol_mutex ->percpu_counters_lock ->shrinker_mutex ->&base->lock ->memcg_idr_lock ->devcgroup_mutex ->cpu_hotplug_lock ->fs_reclaim ->&x->wait#2 ->&rq->__lock ->cgroup_mutex.wait_lock ->&n->list_lock ->cpuset_mutex ->&dom->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->batched_entropy_u32.lock ->cgroup_idr_lock ->task_group_lock ->(wq_completion)cpuset_migrate_mm ->&wq->mutex ->&____s->seqcount#2 ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->stock_lock FD: 78 BD: 1 +.+.: fixmap_lock ->fs_reclaim ->&____s->seqcount ->&c->lock ->pool_lock#2 FD: 316 BD: 138 ++++: cpu_hotplug_lock ->jump_label_mutex ->cpuhp_state_mutex ->freezer_mutex ->&rq->__lock ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) ->smpboot_threads_lock ->&obj_hash[i].lock ->&pool->lock ->&x->wait#4 ->mem_hotplug_lock ->mem_hotplug_lock.waiters.lock ->mem_hotplug_lock.rss.gp_wait.lock ->cpu_hotplug_lock.rss.gp_wait.lock ->rcu_node_0 ->cpu_hotplug_lock.waiters.lock ->&swhash->hlist_mutex ->pmus_lock ->pcp_batch_high_lock ->&xa->xa_lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_attach_mutex ->pcpu_alloc_mutex ->relay_channels_mutex ->&rnp->kthread_mutex ->tmigr_mutex ->sparse_irq_lock ->&x->wait#5 ->cpuhp_state-up ->stop_cpus_mutex ->&stop_pi_lock ->wq_pool_mutex ->flush_lock ->devtree_lock ->&x->wait#8 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#11 ->dev_pm_qos_mtx ->dev_pm_qos_sysfs_mtx ->(console_sem).lock ->xps_map_mutex ->css_set_lock ->cpuset_mutex ->cgroup_threadgroup_rwsem ->cgroup_threadgroup_rwsem.waiters.lock ->cgroup_threadgroup_rwsem.rss.gp_wait.lock ->&list->lock#12 ->slab_mutex ->&cfs_rq->removed.lock ->&x->wait#10 FD: 32 BD: 144 +.+.: jump_label_mutex ->patch_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 496 BD: 30 +.+.: console_mutex ->(console_sem).lock ->console_lock ->&port_lock_key ->syslog_lock ->&rq->__lock ->&root->kernfs_rwsem ->kernfs_notify_lock FD: 1 BD: 232 ..-.: input_pool.lock FD: 1 BD: 1708 ..-.: base_crng.lock FD: 1 BD: 1 ....: rcu_read_lock FD: 1 BD: 1 ....: crng_init_wait.lock FD: 1 BD: 1 ....: early_pfn_lock FD: 1 BD: 149 ....: devtree_lock FD: 1 BD: 1 ....: rcu_read_lock_sched FD: 10 BD: 142 ++++: resource_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount FD: 1 BD: 1 ....: restart_handler_list.lock FD: 1 BD: 1 +.+.: system_transition_mutex FD: 2 BD: 791 -.-.: pcpu_lock ->stock_lock FD: 1 BD: 1 ....: debug_hook_lock FD: 2 BD: 1 ....: zonelist_update_seq ->zonelist_update_seq.seqcount FD: 1 BD: 2 ....: zonelist_update_seq.seqcount FD: 228 BD: 139 +.+.: cpuhp_state_mutex ->cpuhp_state-down ->cpuhp_state-up ->resource_lock ->pool_lock#2 ->(console_sem).lock ->clockevents_lock ->&irq_desc_lock_class ->tmigr_mutex ->&tmc->lock ->&p->pi_lock ->&x->wait#5 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->crypto_alg_sem ->scomp_lock FD: 2 BD: 1301 -.-.: &zone->lock ->&____s->seqcount FD: 1 BD: 1693 .-.-: &____s->seqcount FD: 3 BD: 71 +.+.: &pcp->lock ->&zone->lock FD: 1 BD: 1798 -.-.: pool_lock#2 FD: 82 BD: 274 +.+.: pcpu_alloc_mutex ->pcpu_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->init_mm.page_table_lock ->&obj_hash[i].lock ->&c->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->&n->list_lock ->pcpu_alloc_mutex.wait_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 6 BD: 1604 -.-.: &n->list_lock ->&c->lock FD: 5 BD: 1713 -.-.: &c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 206 BD: 143 +.+.: slab_mutex ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->&____s->seqcount ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock ->lock ->&root->kernfs_rwsem ->&k->list_lock ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->shrink_qlist.lock ->quarantine_lock ->&ACCESS_PRIVATE(sdp, lock) ->remove_cache_srcu ->rcu_node_0 ->&rcu_state.expedited_wq ->&x->wait#9 ->flush_lock ->&sb->s_type->i_lock_key#8 ->rename_lock.seqcount ->&sb->s_type->i_mutex_key#3 ->&dentry->d_lock FD: 79 BD: 100 +.+.: shrinker_mutex ->pool_lock#2 ->fs_reclaim ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount ->&rq->__lock FD: 1 BD: 151 ....: patch_lock FD: 114 BD: 3 +.+.: trace_types_lock ->fs_reclaim ->pool_lock#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 FD: 1 BD: 2 ....: panic_notifier_list.lock FD: 1 BD: 1 ....: die_chain.lock FD: 80 BD: 4 +.+.: trace_event_sem ->trace_event_ida.xa_lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->eventfs_mutex ->&c->lock ->&____s->seqcount FD: 3 BD: 341 ..-.: batched_entropy_u32.lock ->crngs.lock FD: 2 BD: 1706 ..-.: crngs.lock ->base_crng.lock FD: 20 BD: 750 +.+.: sysctl_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 27 BD: 1551 -.-.: &rq->__lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->&rt_b->rt_runtime_lock ->&cp->lock ->&rt_rq->rt_runtime_lock ->pool_lock#2 ->cpu_asid_lock ->&rq->__lock/1 ->&rd->rto_lock FD: 1 BD: 1552 ....: &cfs_b->lock FD: 28 BD: 1 ....: init_task.pi_lock ->&rq->__lock FD: 1 BD: 1 ....: init_task.vtime_seqcount FD: 86 BD: 149 +.+.: wq_pool_mutex ->&____s->seqcount ->&c->lock ->pool_lock#2 ->pcpu_alloc_mutex ->&wq->mutex ->&obj_hash[i].lock ->fs_reclaim ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->hrtimer_bases.lock ->wq_pool_attach_mutex ->&pool->lock ->wq_pool_mutex.wait_lock ->&xa->xa_lock ->&cfs_rq->removed.lock ->pool_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&base->lock ->&____s->seqcount#2 ->&n->list_lock ->remove_cache_srcu FD: 35 BD: 251 +.+.: &wq->mutex ->&pool->lock ->&x->wait#10 ->&rq->__lock FD: 1 BD: 957 -.-.: rcu_node_0 FD: 6 BD: 70 -.-.: rcu_state.barrier_lock ->rcu_node_0 ->&obj_hash[i].lock FD: 35 BD: 3 ....: &rnp->exp_poll_lock FD: 34 BD: 1164 -.-.: &pool->lock ->&obj_hash[i].lock ->pool_lock#2 ->&p->pi_lock ->(worker)->lock ->wq_mayday_lock ->&base->lock ->&nna->lock ->&x->wait#10 FD: 9 BD: 5 ....: trace_event_ida.xa_lock ->&____s->seqcount ->&c->lock ->pool_lock#2 FD: 1 BD: 1 ....: trigger_cmd_mutex FD: 4 BD: 363 +.+.: free_vmap_area_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 351 +.+.: &vn->busy.lock FD: 329 BD: 1 ....: acpi_probe_mutex ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->resource_lock ->cpu_hotplug_lock ->(console_sem).lock ->irq_domain_mutex ->pcpu_alloc_mutex ->&domain->mutex ->&desc->request_mutex ->&irq_desc_lock_class ->cpu_pm_notifier.lock ->&obj_hash[i].lock ->&vn->lazy.lock ->iort_msi_chip_lock ->its_lock ->efi_mem_reserve_persistent_lock ->lpi_range_lock ->syscore_ops_lock ->clocksource_mutex FD: 5 BD: 360 +.+.: init_mm.page_table_lock ->&obj_hash[i].lock FD: 78 BD: 7 +.+.: irq_domain_mutex ->pool_lock#2 ->fs_reclaim ->&obj_hash[i].lock FD: 182 BD: 7 +.+.: &domain->mutex ->sparse_irq_lock ->pool_lock#2 ->&irq_desc_lock_class ->&____s->seqcount ->fs_reclaim ->&its->dev_alloc_lock ->&c->lock FD: 179 BD: 144 +.+.: sparse_irq_lock ->pool_lock#2 ->pcpu_alloc_mutex ->&obj_hash[i].lock ->&____s->seqcount ->&c->lock ->(cpu_running).wait.lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->&x->wait#5 ->&p->pi_lock ->&irq_desc_lock_class ->fs_reclaim ->lock ->&root->kernfs_rwsem ->proc_subdir_lock ->&ent->pde_unload_lock ->proc_inum_ida.xa_lock ->&cfs_rq->removed.lock ->sysfs_symlink_target_lock ->kernfs_idr_lock ->&sem->wait_lock FD: 8 BD: 161 -.-.: &irq_desc_lock_class ->irq_controller_lock ->mask_lock ->&gic_data_rdist()->rd_lock ->irq_resend_lock ->&its->lock ->tmp_mask_lock FD: 15 BD: 34 +.+.: &desc->request_mutex ->&irq_desc_lock_class ->proc_subdir_lock ->&ent->pde_unload_lock ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 2 ....: cpu_pm_notifier.lock FD: 1 BD: 192 +.+.: &vn->lazy.lock FD: 1 BD: 3 +.+.: iort_msi_chip_lock FD: 2 BD: 2 ....: its_lock ->&its->lock FD: 1 BD: 2 ....: efi_mem_reserve_persistent_lock FD: 5 BD: 9 +.+.: lpi_range_lock ->&obj_hash[i].lock ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 2 +.+.: syscore_ops_lock FD: 1 BD: 166 ....: &its->lock FD: 1 BD: 140 +.+.: cpuhp_state-down FD: 217 BD: 140 +.+.: cpuhp_state-up ->smpboot_threads_lock ->sparse_irq_lock ->&swhash->hlist_mutex ->pmus_lock ->&tmc->lock ->&x->wait#3 ->&obj_hash[i].lock ->hrtimer_bases.lock ->wq_pool_mutex ->rcu_node_0 ->fs_reclaim ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&rnp->kthread_mutex ->resource_lock ->&rq->__lock ->lock ->&root->kernfs_rwsem ->batched_entropy_u8.lock ->kfence_freelist_lock ->semaphore->lock ->thermal_cdev_ida.xa_lock ->cpufreq_driver_lock ->&x->wait#8 ->&k->list_lock ->gdp_mutex ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#31 ->thermal_list_lock ->*(&acpi_gbl_reference_count_lock) ->&k->k_lock ->swap_slots_cache_mutex ->&n->list_lock FD: 4 BD: 1 -.-.: timekeeper_lock ->tk_core.seq.seqcount FD: 3 BD: 1629 ----: tk_core.seq.seqcount ->&obj_hash[i].lock FD: 1 BD: 162 ....: irq_controller_lock FD: 7 BD: 140 ....: clockevents_lock ->tk_core.seq.seqcount ->tick_broadcast_lock ->jiffies_seq.seqcount FD: 3 BD: 141 -...: tick_broadcast_lock ->jiffies_lock FD: 1 BD: 143 ----: jiffies_seq.seqcount FD: 317 BD: 2 +.+.: clocksource_mutex ->cpu_hotplug_lock ->(console_sem).lock FD: 13 BD: 1626 -.-.: &base->lock ->&obj_hash[i].lock ->&base->lock/1 ->&tmc->lock FD: 3 BD: 11 ....: batched_entropy_u64.lock ->crngs.lock FD: 180 BD: 141 +.+.: pmus_lock ->pcpu_alloc_mutex ->pool_lock#2 ->&obj_hash[i].lock ->&cpuctx_mutex ->fs_reclaim ->&k->list_lock ->lock ->&root->kernfs_rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&x->wait#8 ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#28 FD: 1 BD: 141 +.+.: &swhash->hlist_mutex FD: 1 BD: 142 +.+.: &cpuctx_mutex FD: 1 BD: 14 ....: tty_ldiscs_lock FD: 30 BD: 35 ....: kbd_event_lock ->led_lock FD: 29 BD: 37 ..-.: led_lock ->&p->pi_lock FD: 1 BD: 498 ..-.: console_owner_lock FD: 5 BD: 122 ..-.: once_lock ->&obj_hash[i].lock ->crngs.lock FD: 45 BD: 3 +.+.: init_task.alloc_lock ->init_fs.lock FD: 80 BD: 2 +.+.: acpi_ioremap_lock ->pool_lock#2 ->fs_reclaim ->free_vmap_area_lock ->&vn->busy.lock FD: 1 BD: 159 ....: semaphore->lock FD: 1 BD: 158 +.+.: *(&acpi_gbl_reference_count_lock) FD: 13 BD: 1604 -.-.: hrtimer_bases.lock ->tk_core.seq.seqcount ->&obj_hash[i].lock FD: 1 BD: 725 ..-.: percpu_counters_lock FD: 37 BD: 2 +.+.: tomoyo_policy_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&c->lock ->&____s->seqcount ->&n->list_lock FD: 1198 BD: 4 ++++: pernet_ops_rwsem ->stack_depot_init_mutex ->crngs.lock ->net_rwsem ->proc_inum_ida.xa_lock ->pool_lock#2 ->proc_subdir_lock ->fs_reclaim ->&____s->seqcount ->&c->lock ->sysctl_lock ->pcpu_alloc_mutex ->net_generic_ids.xa_lock ->mmu_notifier_invalidate_range_start ->&dir->lock ->&obj_hash[i].lock ->k-sk_lock-AF_NETLINK ->k-slock-AF_NETLINK ->nl_table_lock ->nl_table_wait.lock ->rtnl_mutex ->uevent_sock_mutex ->&net->rules_mod_lock ->slab_mutex ->batched_entropy_u32.lock ->pool_lock ->percpu_counters_lock ->&zone->lock ->k-slock-AF_INET/1 ->cache_list_lock ->tk_core.seq.seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->running_helpers_waitq.lock ->&rq->__lock ->&sn->pipefs_sb_lock ->krc.lock ->&s->s_inode_list_lock ->nf_hook_mutex ->cpu_hotplug_lock ->hwsim_netgroup_ida.xa_lock ->nf_ct_ecache_mutex ->nf_log_mutex ->ipvs->est_mutex ->&base->lock ->__ip_vs_app_mutex ->batched_entropy_u8.lock ->kfence_freelist_lock ->&hashinfo->lock#2 ->&net->ipv6.ip6addrlbl_table.lock ->(console_sem).lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->wq_pool_mutex ->pcpu_lock ->&list->lock#4 ->&dir->lock#2 ->ptype_lock ->k-clock-AF_TIPC ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&this->receive_lock ->once_lock ->nf_ct_proto_mutex ->k-sk_lock-AF_RXRPC ->k-slock-AF_RXRPC ->&rxnet->conn_lock ->&call->waitq ->&rx->call_lock ->&rxnet->call_lock ->&n->list_lock ->rcu_node_0 ->&rcu_state.gp_wq ->&cfs_rq->removed.lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->rdma_nets.xa_lock ->devices_rwsem ->key ->&____s->seqcount#2 ->remove_cache_srcu ->&sem->wait_lock ->uevent_sock_mutex.wait_lock ->&net->nsid_lock ->ebt_mutex ->nf_nat_proto_mutex ->&xt[i].mutex ->&nft_net->commit_mutex ->netns_bpf_mutex ->rcu_state.barrier_mutex ->lweventlist_lock ->napi_hash_lock ->&wq->mutex ->wq_mayday_lock ->&x->wait ->netdev_unregistering_wq.lock ->(&net->fs_probe_timer) ->&net->cells_lock ->(&net->cells_timer) ->(&net->fs_timer) ->bit_wait_table + i ->(wq_completion)kafsd ->k-clock-AF_RXRPC ->&local->services_lock ->(wq_completion)krxrpcd ->rlock-AF_RXRPC ->&sb->s_type->i_lock_key#9 ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&ent->pde_unload_lock ->ovs_mutex ->&srv->idr_lock ->&rnp->exp_wq[0] ->rcu_state.exp_mutex.wait_lock ->&rnp->exp_lock ->&rnp->exp_wq[1] ->&tn->nametbl_lock ->&rnp->exp_wq[3] ->&ht->mutex ->(wq_completion)krdsd ->(&icsk->icsk_retransmit_timer) ->(&icsk->icsk_delack_timer) ->(&sk->sk_timer) ->rds_tcp_conn_lock ->&rcu_state.expedited_wq ->loop_conns_lock ->(wq_completion)l2tp ->(&rxnet->peer_keepalive_timer) ->(&rxnet->service_conn_reap_timer) ->&fn->fou_lock ->ipvs->sync_mutex ->hwsim_radio_lock ->pin_fs_lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key#3 ->&sb->s_type->i_lock_key#8 ->mount_lock ->(inetaddr_chain).rwsem ->inet6addr_chain.lock ->&x->wait#2 ->&list->lock#16 ->&rdev->wiphy.mtx ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->&k->k_lock ->sysfs_symlink_target_lock ->subsys mutex#39 ->&x->wait#8 ->dpm_list_mtx ->&dev->power.lock ->deferred_probe_mutex ->device_links_lock ->&rfkill->lock ->rfkill_global_mutex ->triggers_list_lock ->leds_list_lock ->&x->wait#10 ->subsys mutex#52 ->gdp_mutex ->(&local->sta_cleanup) ->rfkill_global_mutex.wait_lock ->rdma_nets_rwsem ->k-clock-AF_NETLINK ->&nlk->wait ->&wg->device_update_lock ->&bat_priv->forw_bcast_list_lock ->&bat_priv->forw_bat_list_lock ->&bat_priv->gw.list_lock ->&bat_priv->bat_v.ogm_buff_mutex ->&bat_priv->tvlv.container_list_lock ->&bat_priv->tvlv.handler_list_lock ->key#16 ->key#17 ->&hash->list_locks[i] ->key#18 ->key#15 ->key#30 ->&bat_priv->tt.req_list_lock ->&bat_priv->tt.changes_list_lock ->&bat_priv->tt.roam_list_lock ->key#35 ->&hn->hn_lock ->&pnettable->lock ->&pnetids_ndev->lock ->k-sk_lock-AF_INET6/1 ->&net->sctp.addr_wq_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET#2 ->&sn->gssp_lock ->&cd->hash_lock ->(&net->can.stattimer) ->&vn->busy.lock ->&vn->lazy.lock ->stock_lock ->&net->xfrm.xfrm_state_lock ->&sb->s_type->i_lock_key#24 ->rename_lock.seqcount ->ip6_fl_lock ->(&net->ipv6.ip6_fib_timer) ->__ip_vs_mutex ->(&ipvs->dest_trash_timer) ->recent_lock ->hashlimit_mutex ->nf_conntrack_mutex ->tcp_metrics_lock ->tcp_exit_batch_mutex ->k-clock-AF_INET ->&net->xfrm.xfrm_policy_lock ->&xa->xa_lock#4 ->genl_sk_destructing_waitq.lock ->&lock->wait_lock ->rcu_state.exp_mutex ->rcu_state.barrier_mutex.wait_lock ->&rdev->bss_lock ->quarantine_lock ->&meta->lock FD: 28 BD: 72 +.+.: stack_depot_init_mutex ->&rq->__lock FD: 35 BD: 91 ++++: net_rwsem ->&list->lock#2 ->&rq->__lock ->pool_lock#2 ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&c->lock ->quarantine_lock ->&____s->seqcount ->&n->list_lock FD: 2 BD: 170 ..-.: proc_inum_ida.xa_lock ->pool_lock#2 FD: 1087 BD: 66 +.+.: rtnl_mutex ->pool_lock#2 ->fs_reclaim ->pcpu_alloc_mutex ->&xa->xa_lock#4 ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->nl_table_lock ->nl_table_wait.lock ->running_helpers_waitq.lock ->subsys mutex#19 ->&dir->lock#2 ->&zone->lock ->dev_hotplug_mutex ->input_pool.lock ->netdev_rename_lock.seqcount ->net_rwsem ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->krc.lock ->&rq->__lock ->stack_depot_init_mutex ->wq_pool_mutex ->crngs.lock ->&cfs_rq->removed.lock ->pool_lock ->lweventlist_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&k->k_lock ->param_lock ->(console_sem).lock ->&rdev->wiphy.mtx ->&base->lock ->uevent_sock_mutex ->subsys mutex#54 ->&sdata->sec_mtx ->&local->iflist_mtx#2 ->lock#7 ->failover_lock ->&tn->lock ->bh_lock ->rcu_node_0 ->&idev->mc_lock ->&ndev->lock ->&pnettable->lock ->smc_ib_devices.mutex ->&(&net->nexthop.notifier_chain)->rwsem ->reg_requests_lock ->reg_pending_beacons_lock ->devnet_rename_sem ->&rnp->exp_wq[1] ->quarantine_lock ->&ent->pde_unload_lock ->target_list_lock ->(inetaddr_validator_chain).rwsem ->(inetaddr_chain).rwsem ->&dev_addr_list_lock_key#4 ->netpoll_srcu ->&in_dev->mc_tomb_lock ->&im->lock ->fib_info_lock ->cbs_list_lock ->(inet6addr_validator_chain).rwsem ->&net->ipv6.addrconf_hash_lock ->&ifa->lock ->&tb->tb6_lock ->&n->list_lock ->&dev_addr_list_lock_key ->rlock-AF_NETLINK ->napi_hash_lock ->lapb_list_lock ->x25_neigh_list_lock ->console_owner_lock ->console_owner ->&dev_addr_list_lock_key#2 ->_xmit_SLIP ->_xmit_ETHER ->&sem->wait_lock ->&p->pi_lock ->free_vmap_area_lock ->&vn->busy.lock ->init_mm.page_table_lock ->&cma->lock ->cma_mutex ->cpu_hotplug_lock ->&priv->adminq_lock ->rtnl_mutex.wait_lock ->&rfkill->lock ->remove_cache_srcu ->_xmit_VOID ->&dev_addr_list_lock_key#3 ->_xmit_X25 ->&lapbeth->up_lock ->&lapb->lock ->class ->(&tbl->proxy_timer) ->&ul->lock#2 ->&n->lock ->&dev->tx_global_lock ->&rnp->exp_wq[2] ->&sch->root_lock_key ->&rnp->exp_wq[3] ->&sch->root_lock_key#2 ->&dir->lock ->&wpan_dev->association_lock ->dev_addr_sem ->_xmit_IEEE802154 ->&nr_netdev_addr_lock_key ->listen_lock ->&r->consumer_lock ->&mm->mmap_lock ->pcpu_lock ->(switchdev_blocking_notif_chain).rwsem ->&br->hash_lock ->nf_hook_mutex ->j1939_netdev_lock ->key ->percpu_counters_lock ->&bat_priv->tvlv.handler_list_lock ->&bat_priv->tvlv.container_list_lock ->&bat_priv->softif_vlan_list_lock ->key#15 ->&bat_priv->tt.changes_list_lock ->kernfs_idr_lock ->&rnp->exp_wq[0] ->noop_qdisc.q.lock ->tk_core.seq.seqcount ->init_lock ->&rcu_state.expedited_wq ->deferred_lock ->&dev_addr_list_lock_key#8 ->&br->lock ->&pn->hash_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&hard_iface->bat_iv.ogm_buff_mutex ->ptype_lock ->_xmit_NONE ->lock#9 ->&hsr->list_lock ->&x->wait#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->mount_lock ->&meta->lock ->&xa->xa_lock#18 ->&dev_addr_list_lock_key#10/1 ->req_lock ->&x->wait#11 ->subsys mutex#75 ->bpf_devs_lock ->&devlink_port->type_lock ->&vn->sock_lock ->page_pools_lock ->&wg->device_update_lock ->&dev_addr_list_lock_key#12 ->&dev_addr_list_lock_key#6/1 ->&dev_addr_list_lock_key#13 ->&dev_addr_list_lock_key#14 ->&dev_addr_list_lock_key#5 ->&dev_addr_list_lock_key/1 ->&dev_addr_list_lock_key#7/1 ->_xmit_ETHER/1 ->&rcu_state.gp_wq ->&nn->netlink_tap_lock ->&batadv_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key#9/1 ->&macvlan_netdev_addr_lock_key/1 ->&ipvlan->addrs_lock ->&macsec_netdev_addr_lock_key/1 ->key#18 ->&bat_priv->tt.commit_lock ->&dev_addr_list_lock_key#11 ->mmu_notifier_invalidate_range_start ->k-slock-AF_INET/1 ->k-sk_lock-AF_INET ->k-slock-AF_INET#2 ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&ul->lock ->&____s->seqcount#2 ->&lock->wait_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->stock_lock ->&pn->all_ppp_mutex ->&ppp->rlock ->&ppp->wlock ->&hwstats->hwsdev_list_lock ->&net->xdp.lock ->mirred_list_lock ->&nft_net->commit_mutex ->&idev->mc_query_lock ->&idev->mc_report_lock ->&pnn->pndevs.lock ->&pnn->routes.lock ->&dev_addr_list_lock_key#15 ->&dev->ethtool->rss_lock ->&pf->rwait ->dev_pm_qos_sysfs_mtx ->deferred_probe_mutex ->device_links_lock ->__ip_vs_mutex ->flowtable_lock ->&tun->lock ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->rcu_state.exp_mutex.wait_lock ->&sb->s_type->i_lock_key#24 ->&dentry->d_lock ->rename_lock.seqcount ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&r->consumer_lock#3 ->&net->xfrm.xfrm_state_lock ->xfrm_state_dev_gc_lock ->&net->xfrm.xfrm_policy_lock ->&sb->s_type->i_lock_key#8 ->wq_pool_mutex.wait_lock ->(&pmctx->ip6_mc_router_timer) ->(&pmctx->ip4_mc_router_timer) ->&ht->mutex ->&br->multicast_lock ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#3 ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 ->&table->hash[i].lock ->k-clock-AF_INET ->&sb->s_type->i_lock_key#9 ->k-clock-AF_INET6 ->(&br->hello_timer) ->(&br->topology_change_timer) ->(&br->tcn_timer) ->(&brmctx->ip4_mc_router_timer) ->(&brmctx->ip4_other_query.timer) ->(&brmctx->ip4_other_query.delay_timer) ->(&brmctx->ip4_own_query.timer) ->(&brmctx->ip6_mc_router_timer) ->(&brmctx->ip6_other_query.timer) ->(&brmctx->ip6_other_query.delay_timer) ->(&brmctx->ip6_own_query.timer) ->&pmc->lock ->(&mp->timer) ->rcu_state.barrier_mutex ->&app->lock#2 ->(&app->join_timer)#2 ->(&app->periodic_timer) ->&list->lock#11 ->(&app->join_timer) ->&app->lock ->&list->lock#10 ->&rdev->dev_wait ->&fq->lock ->(&hsr->prune_timer) ->(&hsr->prune_proxy_timer) ->(&hsr->announce_timer) ->(&hsr->announce_proxy_timer) ->&bat_priv->forw_bcast_list_lock ->&bat_priv->forw_bat_list_lock ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#4 ->&r->consumer_lock#2 ->&wg->socket_update_lock ->raw_notifier_lock ->bcm_notifier_lock ->isotp_notifier_lock ->_xmit_NETROM#2 ->sk_lock-AF_INET6 ->slock-AF_INET6 ->&caifn->caifdevs.lock ->&net->rules_mod_lock ->(&mrt->ipmr_expire_timer) ->netlbl_unlhsh_lock ->nr_list_lock ->nr_neigh_list_lock ->&bpq_netdev_addr_lock_key ->&bond->mode_lock ->reg_indoor_lock ->pcpu_alloc_mutex.wait_lock ->&sl->lock ->qdisc_mod_lock ->&block->lock ->&block->cb_lock ->&xa->xa_lock#26 ->&priv->active_session_list_lock ->&priv->j1939_socks_lock ->&priv->lock ->&chain->filter_chain_lock ->cls_mod_lock ->&tp->lock ->&head->masks_lock ->act_mod_lock ->&tn->idrinfo->lock ->&p->tcfa_lock ->flow_indr_block_lock ->key#38 ->&p->alloc_lock ->&newf->file_lock ->&sb->s_type->i_lock_key#16 ->bpf_dispatcher_xdp.mutex ->&block->proto_destroy_lock ->team->team_lock_key#17 ->&sch->root_lock_key#327 ->&sch->root_lock_key#328 ->&sch->root_lock_key#329 ->&sch->root_lock_key#330 ->&sch->root_lock_key#336 ->&sch->root_lock_key#337 ->&sch->root_lock_key#338 ->&sch->root_lock_key#339 ->&sch->root_lock_key#340 ->&sch->root_lock_key#341 ->ifalias_mutex ->&dev_addr_list_lock_key/2 ->sk_lock-AF_CAN ->slock-AF_CAN ->&sch->root_lock_key#724 ->acaddr_hash_lock ->&r->consumer_lock#4 ->team->team_lock_key#20 ->&sch->root_lock_key#734 ->&sch->root_lock_key#735 ->&sch->root_lock_key#736 ->&sch->root_lock_key#737 ->&sch->root_lock_key#742 ->&sch->root_lock_key#743 ->&sch->root_lock_key#744 ->&sch->root_lock_key#745 ->&sch->root_lock_key#746 ->&sch->root_lock_key#747 ->team->team_lock_key#21 ->&sch->root_lock_key#750 ->&sch->root_lock_key#751 ->&sch->root_lock_key#752 ->&sch->root_lock_key#753 ->&sch->root_lock_key#756 ->&sch->root_lock_key#757 ->&sch->root_lock_key#758 ->&sch->root_lock_key#759 ->&sch->root_lock_key#760 ->&sch->root_lock_key#761 ->&data->nh_lock ->&dev_addr_list_lock_key#16 ->&xs->mutex ->&batadv_netdev_addr_lock_key ->&sch->root_lock_key#762 ->&sch->root_lock_key#763 ->&sch->root_lock_key#764 ->&sch->root_lock_key#765 ->team->team_lock_key#22 ->team->team_lock_key#23 ->&sch->root_lock_key#766 ->&sch->root_lock_key#767 ->&sch->root_lock_key#768 ->&sch->root_lock_key#769 ->&sch->root_lock_key#770 ->&sch->root_lock_key#771 ->&sch->root_lock_key#772 ->&sch->root_lock_key#773 ->&sch->root_lock_key#774 ->&sch->root_lock_key#775 ->&sch->root_lock_key#776 ->&sch->root_lock_key#777 ->&sch->root_lock_key#778 ->&sch->root_lock_key#779 ->&sch->root_lock_key#780 ->&sch->root_lock_key#781 ->team->team_lock_key#24 ->&sch->root_lock_key#782 ->&sch->root_lock_key#783 ->&sch->root_lock_key#784 ->&sch->root_lock_key#785 ->&sch->root_lock_key#786 ->&sch->root_lock_key#787 ->&sch->root_lock_key#788 ->&sch->root_lock_key#789 ->&sch->root_lock_key#790 ->&sch->root_lock_key#791 ->&sch->root_lock_key#792 ->&sch->root_lock_key#793 ->&sch->root_lock_key#794 ->&sch->root_lock_key#795 ->&sch->root_lock_key#796 ->&sch->root_lock_key#797 ->&sch->root_lock_key#798 ->&sch->root_lock_key#799 ->&sch->root_lock_key#800 ->&sch->root_lock_key#801 ->&sch->root_lock_key#802 ->&sch->root_lock_key#803 ->&sch->root_lock_key#810 ->&sch->root_lock_key#811 ->&sch->root_lock_key#812 ->&macsec_netdev_addr_lock_key#2/2 ->&tn->nametbl_lock ->&ht->lock ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->sk_lock-AF_TIPC ->slock-AF_TIPC ->&tipc_net(net)->bclock FD: 96 BD: 398 +.+.: lock ->kernfs_idr_lock ->cgroup_idr_lock ->memcg_idr_lock ->pidmap_lock ->drm_minor_lock ->&file_private->table_lock ->&q->queue_lock ->&group->inotify_data.idr_lock ->map_idr_lock ->&mp->m_perag_lock ->&ip->i_flags_lock ->&pag->pag_ici_lock ->&sb->s_type->i_lock_key#35 ->&fs_info->buffer_lock ->&fs_info->fs_roots_radix_lock ->sctp_assocs_id_lock ->&nm_i->nid_list_lock ->prog_idr_lock ->&clnt->lock ->&mru->lock ->link_idr_lock ->&im->ino_lock FD: 13 BD: 401 +.+.: kernfs_idr_lock ->pool_lock#2 ->&____s->seqcount ->&c->lock ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 FD: 122 BD: 314 ++++: &root->kernfs_rwsem ->&root->kernfs_iattr_rwsem ->kernfs_idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&cfs_rq->removed.lock ->&sem->wait_lock ->&c->lock ->&____s->seqcount ->rcu_node_0 ->fs_reclaim ->mmu_notifier_invalidate_range_start ->inode_hash_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#25 ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->kernfs_rename_lock ->&p->pi_lock ->&sb->s_type->i_lock_key#31 ->&sb->s_type->i_lock_key#32 ->&____s->seqcount#2 ->&xa->xa_lock#5 ->stock_lock ->&n->list_lock ->&rcu_state.expedited_wq ->&rcu_state.gp_wq ->key ->pcpu_lock ->percpu_counters_lock FD: 1 BD: 4 ++++: file_systems_lock FD: 79 BD: 318 ++++: &root->kernfs_iattr_rwsem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->iattr_mutex ->&sem->wait_lock ->tk_core.seq.seqcount FD: 5 BD: 184 +.+.: dq_list_lock ->dq_state_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 10 BD: 66 +.+.: sb_lock ->unnamed_dev_ida.xa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 121 BD: 2 +.+.: &type->s_umount_key/1 ->&c->lock ->&____s->seqcount ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->percpu_counters_lock ->crngs.lock ->&sbinfo->stat_lock ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&sb->s_type->i_lock_key ->&dentry->d_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->remove_cache_srcu ->&xa->xa_lock#5 ->stock_lock ->&n->list_lock FD: 28 BD: 60 +.+.: list_lrus_mutex ->&rq->__lock FD: 1 BD: 68 ....: unnamed_dev_ida.xa_lock FD: 1 BD: 66 +.+.: &sbinfo->stat_lock FD: 77 BD: 501 +.+.: &s->s_inode_list_lock ->&sb->s_type->i_lock_key#34 ->&sb->s_type->i_lock_key#35 ->&sb->s_type->i_lock_key#37 ->&sb->s_type->i_lock_key#36 ->&sb->s_type->i_lock_key#38 ->&sb->s_type->i_lock_key#39 ->&sb->s_type->i_lock_key#41 ->&sb->s_type->i_lock_key#23 ->&sb->s_type->i_lock_key#42 ->&sb->s_type->i_lock_key#43 ->dq_data_lock ->&sb->s_type->i_lock_key#45 ->&sb->s_type->i_lock_key#47 ->&sb->s_type->i_lock_key#50 ->&sb->s_type->i_lock_key#49 ->&sb->s_type->i_lock_key#51 FD: 60 BD: 509 +.+.: &sb->s_type->i_lock_key ->&dentry->d_lock ->&xa->xa_lock#9 ->&shmem_falloc_waitq ->&p->pi_lock FD: 42 BD: 748 +.+.: &dentry->d_lock ->&wq ->&dentry->d_lock/1 ->&obj_hash[i].lock ->pool_lock#2 ->&p->pi_lock ->&wq#2 ->sysctl_lock ->&dentry->d_lock/2 ->&wq#3 ->&wq#4 FD: 2 BD: 217 ....: mnt_id_ida.xa_lock ->pool_lock#2 FD: 47 BD: 291 +.+.: mount_lock ->mount_lock.seqcount ->&dentry->d_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 45 BD: 289 +.+.: mount_lock.seqcount ->&new_ns->poll ->&dentry->d_lock ->&obj_hash[i].lock ->&____s->seqcount ->pool_lock#2 ->mnt_group_ida.xa_lock ->&p->pi_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#2/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#2 ->&dentry->d_lock FD: 2 BD: 142 -.-.: jiffies_lock ->jiffies_seq.seqcount FD: 29 BD: 1 -.-.: log_wait.lock ->&p->pi_lock FD: 43 BD: 364 +.+.: &sb->s_type->i_lock_key#2 ->&dentry->d_lock FD: 1 BD: 4 ..-.: ucounts_lock FD: 44 BD: 261 +.+.: init_fs.lock ->init_fs.seq.seqcount ->&dentry->d_lock FD: 1 BD: 251 +.+.: init_fs.seq.seqcount FD: 1 BD: 214 +.+.: mnt_ns_tree_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#3/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#3 ->&dentry->d_lock FD: 59 BD: 509 +.+.: &sb->s_type->i_lock_key#3 ->&dentry->d_lock ->&xa->xa_lock#9 FD: 1 BD: 171 ++++: proc_subdir_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#4/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#4 ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#4 ->&dentry->d_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#5/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#5 ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#5 ->&dentry->d_lock FD: 36 BD: 150 ....: cgroup_file_kn_lock ->kernfs_notify_lock FD: 38 BD: 149 ..-.: css_set_lock ->cgroup_file_kn_lock ->&p->pi_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 2 BD: 399 +...: cgroup_idr_lock ->pool_lock#2 FD: 58 BD: 142 +.+.: cpuset_mutex ->callback_lock ->jump_label_mutex ->&p->pi_lock ->&rq->__lock ->&p->alloc_lock ->cpuset_attach_wq.lock ->rcu_node_0 FD: 1 BD: 143 ....: callback_lock FD: 83 BD: 18 +.+.: blkcg_pol_mutex ->pcpu_alloc_mutex ->fs_reclaim ->pool_lock#2 FD: 8 BD: 399 +.+.: memcg_idr_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 18 +.+.: devcgroup_mutex FD: 30 BD: 142 +.+.: freezer_mutex ->freezer_lock ->rcu_node_0 ->&rq->__lock FD: 45 BD: 69 +.+.: rcu_state.exp_mutex ->rcu_node_0 ->rcu_state.exp_wake_mutex ->(worker)->lock ->&rnp->exp_wq[2] ->&rq->__lock ->&rnp->exp_wq[3] ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&rcu_state.expedited_wq ->key ->pcpu_lock ->percpu_counters_lock ->rcu_state.exp_mutex.wait_lock FD: 35 BD: 143 +.+.: rcu_state.exp_wake_mutex ->rcu_node_0 ->&rnp->exp_lock ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->&rnp->exp_wq[2] ->&rnp->exp_wq[3] ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 145 +.+.: &rnp->exp_lock FD: 29 BD: 147 ....: &rnp->exp_wq[0] ->&p->pi_lock FD: 29 BD: 145 ....: &rnp->exp_wq[1] ->&p->pi_lock FD: 1 BD: 218 ....: init_sighand.siglock FD: 1 BD: 3 +.+.: init_files.file_lock FD: 30 BD: 419 ....: pidmap_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&p->pi_lock ->&____s->seqcount#2 FD: 157 BD: 141 ++++: cgroup_threadgroup_rwsem ->css_set_lock ->&p->pi_lock ->tk_core.seq.seqcount ->tasklist_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->key ->pcpu_lock ->percpu_counters_lock ->&sighand->siglock ->cgroup_threadgroup_rwsem.rss.gp_wait.lock ->&x->wait#2 ->fs_reclaim ->mmu_notifier_invalidate_range_start ->inode_hash_lock ->&sb->s_type->i_lock_key#31 ->&root->kernfs_iattr_rwsem ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&c->lock ->cpuset_mutex ->freezer_mutex ->&p->alloc_lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->cgroup_threadgroup_rwsem.waiters.lock ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 28 BD: 1475 -.-.: &p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&rq->__lock/1 FD: 67 BD: 217 .+.+: tasklist_lock ->init_sighand.siglock ->&sighand->siglock ->&pid->wait_pidfd ->&obj_hash[i].lock ->quarantine_lock ->&p->alloc_lock ->stock_lock ->pool_lock#2 ->&p->pi_lock FD: 1 BD: 1553 -.-.: &per_cpu_ptr(group->pcpu, cpu)->seq FD: 1 BD: 1 ....: (kthreadd_done).wait.lock FD: 42 BD: 220 -.-.: &sighand->siglock ->&sig->wait_chldexit ->input_pool.lock ->&(&sig->stats_lock)->lock ->&p->pi_lock ->&____s->seqcount ->&c->lock ->pool_lock#2 ->hrtimer_bases.lock ->&obj_hash[i].lock ->&sighand->signalfd_wqh ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&tty->ctrl.lock ->&rq->__lock ->stock_lock ->&____s->seqcount#2 ->&n->list_lock FD: 51 BD: 259 +.+.: &p->alloc_lock ->&____s->seqcount#2 ->init_fs.lock ->&fs->lock ->&x->wait ->cpu_asid_lock ->&x->wait#24 ->&newf->file_lock FD: 1 BD: 1619 .-.-: &____s->seqcount#2 FD: 77 BD: 750 +.+.: fs_reclaim ->mmu_notifier_invalidate_range_start ->&mapping->i_mmap_rwsem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->icc_bw_lock FD: 36 BD: 915 +.+.: mmu_notifier_invalidate_range_start ->dma_fence_map ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->key ->pcpu_lock ->percpu_counters_lock FD: 1 BD: 176 +.+.: kthread_create_lock FD: 29 BD: 288 ....: &x->wait ->&p->pi_lock FD: 38 BD: 1 +.+.: sched_map-wait-type-override ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&pool->lock ->&acct->lock ->&__ctx->lock FD: 29 BD: 1168 ....: (worker)->lock ->&p->pi_lock FD: 36 BD: 150 +.+.: wq_pool_attach_mutex ->&p->pi_lock ->&x->wait#6 ->&pool->lock ->&rq->__lock FD: 1 BD: 1165 ..-.: wq_mayday_lock FD: 4 BD: 1 -.-.: (null) ->tk_core.seq.seqcount FD: 1 BD: 150 ....: &xa->xa_lock FD: 35 BD: 1 +.-.: (&pool->mayday_timer) ->&pool->lock ->&obj_hash[i].lock ->&base->lock FD: 47 BD: 1 +.+.: (wq_completion)rcu_gp ->(work_completion)(&rnp->exp_poll_wq) ->(work_completion)(&(&ssp->srcu_sup->work)->work) ->(work_completion)(&sdp->work) ->&rq->__lock FD: 36 BD: 2 +.+.: (work_completion)(&rnp->exp_poll_wq) ->&rnp->exp_poll_lock FD: 1505 BD: 1 +.+.: (wq_completion)events ->(work_completion)(&w->work) ->(work_completion)(&sscs.work) ->rdist_memreserve_cpuhp_cleanup_work ->(shepherd).work ->(work_completion)(&(&group->avgs_work)->work) ->(work_completion)(&rfkill_global_led_trigger_work) ->timer_update_work ->pcpu_balance_work ->(work_completion)(&p->wq) ->(debug_obj_work).work ->(work_completion)(&helper->damage_work) ->(work_completion)(&rfkill->sync_work) ->(delayed_fput_work).work ->(work_completion)(&gadget->work) ->kernfs_notify_work ->(work_completion)(&(&krcp->monitor_work)->work) ->async_lookup_work ->autoload_work ->(work_completion)(&barr->work) ->drain_vmap_work ->netstamp_work ->reg_work ->(work_completion)(&fw_work->work) ->(work_completion)(&s->destroy_work) ->(work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) ->(work_completion)(&(&ovs_net->masks_rebalance)->work) ->(work_completion)(&ht->run_work) ->(work_completion)(&aux->work) ->(work_completion)(&w->work)#2 ->(deferred_probe_timeout_work).work ->(work_completion)(&sbi->s_sb_upd_work) ->(work_completion)(&cgrp->bpf.release_work) ->(work_completion)(&w->w) ->deferred_process_work ->(work_completion)(&data->fib_event_work) ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->(work_completion)(&(&hwstats->traffic_dw)->work) ->wireless_nlevent_work ->(work_completion)(&(&conn->info_timer)->work) ->(work_completion)(&sci->sc_iput_work) ->(regulator_init_complete_work).work ->(work_completion)(&(&ctx->free_rwork)->work) ->(work_completion)(&tty->hangup_work) ->(work_completion)(&(&ctx->fallback_work)->work) ->free_ipc_work ->&rq->__lock ->(work_completion)(&msk->work) ->binder_deferred_work ->(work_completion)(&(&devlink->rwork)->work) ->(fqdir_free_work).work ->cpool_cleanup_work ->((tcp_md5_needed).work).work ->key_gc_work ->(work_completion)(&ns->work) ->(work_completion)(&m->wq) ->p9_poll_work ->(work_completion)(&m->rq) ->(work_completion)(&ruleset->work_free) ->(work_completion)(&rdev->mgmt_registrations_update_wk) ->(work_completion)(&rdev->conn_work) ->(work_completion)(&(&pool->release_dw)->work) ->(work_completion)(&nlk->work) ->(work_completion)(&sbi->s_error_work) ->(work_completion)(&pool->work) ->(work_completion)(&hu->write_work) ->cfg80211_disconnect_work FD: 317 BD: 2 +.+.: (work_completion)(&w->work) ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 14 BD: 1 +.-.: (&wq_watchdog_timer) ->&obj_hash[i].lock ->&base->lock FD: 1183 BD: 1 +.+.: (wq_completion)events_unbound ->(work_completion)(&(&kfence_timer)->work) ->(next_reseed).work ->(stats_flush_dwork).work ->(work_completion)(&sub_info->work) ->(linkwatch_work).work ->deferred_probe_work ->(work_completion)(&barr->work) ->connector_reaper_work ->(reaper_work).work ->(work_completion)(&rdev->wiphy_work) ->(work_completion)(&port->bc_work) ->(work_completion)(&map->work) ->(work_completion)(&buf->work) ->&rq->__lock ->(work_completion)(&fs_info->async_reclaim_work) ->(work_completion)(&fs_info->async_data_reclaim_work) ->(quota_release_work).work FD: 318 BD: 2 +.+.: (work_completion)(&(&kfence_timer)->work) ->cpu_hotplug_lock ->allocation_wait.lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->pool_lock#2 FD: 29 BD: 3 -.-.: allocation_wait.lock ->&p->pi_lock FD: 3 BD: 1685 ..-.: batched_entropy_u8.lock ->crngs.lock FD: 1 BD: 1685 ..-.: kfence_freelist_lock FD: 1 BD: 521 ..-.: &meta->lock FD: 43 BD: 1 +.+.: rcu_tasks.tasks_gp_mutex ->&obj_hash[i].lock ->&base->lock ->rcu_tasks__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) ->kernel/rcu/tasks.h:155 ->&rq->__lock ->(&timer.timer) ->&x->wait#2 ->(console_sem).lock FD: 1 BD: 1 ....: rcu_tasks.cbs_gbl_lock FD: 14 BD: 3 ..-.: rcu_tasks__percpu.cbs_pcpu_lock ->&obj_hash[i].lock ->&base->lock FD: 29 BD: 194 ..-.: &x->wait#2 ->&p->pi_lock FD: 29 BD: 147 ....: &rnp->exp_wq[2] ->&p->pi_lock FD: 1 BD: 140 ....: &ACCESS_PRIVATE(rtpcp, lock) FD: 1 BD: 2 ....: kernel/rcu/tasks.h:155 FD: 317 BD: 1 +.+.: rcu_tasks_trace.tasks_gp_mutex ->cpu_hotplug_lock ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->&x->wait#2 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->(console_sem).lock FD: 1 BD: 1 ....: rcu_tasks_trace.cbs_gbl_lock FD: 29 BD: 321 +.-.: (&timer.timer) ->&p->pi_lock FD: 35 BD: 1 ..-.: &(&kfence_timer)->timer FD: 29 BD: 146 ....: &rnp->exp_wq[3] ->&p->pi_lock FD: 14 BD: 140 ..-.: rcu_tasks_trace__percpu.cbs_pcpu_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 141 +.+.: (memory_chain).rwsem FD: 80 BD: 141 +.+.: smpboot_threads_lock ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->hrtimer_bases.lock FD: 29 BD: 697 ..-.: &rcu_state.gp_wq ->&p->pi_lock FD: 80 BD: 141 +.+.: &rnp->kthread_mutex ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock FD: 78 BD: 140 +.+.: tmigr_mutex ->fs_reclaim ->pool_lock#2 ->&group->lock ->&obj_hash[i].lock FD: 1 BD: 1629 ..-.: &group->lock FD: 2 BD: 1628 ..-.: &tmc->lock ->&group->lock FD: 28 BD: 148 -.-.: &stop_pi_lock ->&rq->__lock FD: 1 BD: 148 -.-.: &stopper->lock FD: 1 BD: 2 +.+.: (module_notify_list).rwsem FD: 1 BD: 1 +.+.: ddebug_lock FD: 2 BD: 1 +.+.: cci_probing ->devtree_lock FD: 1 BD: 1 +.+.: ptlock_ptr(ptdesc) FD: 317 BD: 1 +.+.: watchdog_mutex ->cpu_hotplug_lock FD: 16 BD: 2 +.+.: (work_completion)(&sscs.work) ->&x->wait#3 ->&obj_hash[i].lock ->hrtimer_bases.lock ->&x->wait#4 FD: 1 BD: 142 -.-.: &x->wait#3 FD: 1 BD: 140 ....: &x->wait#4 FD: 2 BD: 262 +.+.: &newf->file_lock ->&newf->resize_wait FD: 1 BD: 1 ....: &p->vtime.seqcount FD: 183 BD: 139 ++++: mem_hotplug_lock ->mem_hotplug_lock.rss.gp_wait.lock ->memory_tier_lock FD: 3 BD: 140 ..-.: mem_hotplug_lock.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 139 ....: mem_hotplug_lock.waiters.lock FD: 317 BD: 4 +.+.: cpu_add_remove_lock ->cpu_hotplug_lock ->cpu_hotplug_lock.waiters.lock ->cpu_hotplug_lock.rss.gp_wait.lock FD: 3 BD: 139 ..-.: cpu_hotplug_lock.rss.gp_wait.lock ->&obj_hash[i].lock FD: 29 BD: 139 ....: cpu_hotplug_lock.waiters.lock ->&p->pi_lock FD: 1 BD: 1 ....: rcu_callback FD: 1 BD: 140 +.+.: pcp_batch_high_lock FD: 1 BD: 139 +.+.: relay_channels_mutex FD: 29 BD: 145 ....: (cpu_running).wait.lock ->&p->pi_lock FD: 29 BD: 145 ....: &x->wait#5 ->&p->pi_lock FD: 1 BD: 1559 -.-.: &cfs_rq->removed.lock FD: 1 BD: 151 ....: &x->wait#6 FD: 15 BD: 1553 -.-.: &rt_b->rt_runtime_lock ->&rt_rq->rt_runtime_lock ->tk_core.seq.seqcount ->hrtimer_bases.lock FD: 1 BD: 1554 -.-.: &rt_rq->rt_runtime_lock FD: 31 BD: 139 +.+.: stop_cpus_mutex ->&stopper->lock ->&stop_pi_lock ->&rq->__lock ->&x->wait#7 FD: 1 BD: 141 ....: &x->wait#7 FD: 317 BD: 2 +.+.: rdist_memreserve_cpuhp_cleanup_work ->cpu_hotplug_lock FD: 83 BD: 1 +.+.: sched_domains_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->pcpu_alloc_mutex ->&c->lock ->pcpu_lock FD: 1 BD: 1552 ....: &cp->lock FD: 121 BD: 1 +.+.: &type->s_umount_key#6/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->percpu_counters_lock ->crngs.lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&sb->s_type->i_lock_key#6 ->&dentry->d_lock FD: 43 BD: 365 +.+.: &sb->s_type->i_lock_key#6 ->&dentry->d_lock FD: 29 BD: 1 ....: (setup_done).wait.lock ->&p->pi_lock FD: 127 BD: 213 ++++: namespace_sem ->fs_reclaim ->&____s->seqcount ->&c->lock ->pool_lock#2 ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->mnt_ns_tree_lock ->rename_lock ->&obj_hash[i].lock ->&n->list_lock ->tasklist_lock ->&____s->seqcount#2 ->stock_lock ->&rq->__lock ->rcu_node_0 ->mnt_group_ida.xa_lock ->release_agent_path_lock ->&rcu_state.expedited_wq FD: 1 BD: 251 +.+.: &____s->seqcount#3 FD: 91 BD: 1 +.+.: &type->s_umount_key#7 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&____s->seqcount ->&c->lock ->&lru->node[i].lock ->&sbinfo->stat_lock ->&obj_hash[i].lock FD: 1 BD: 1038 +.+.: &lru->node[i].lock FD: 137 BD: 7 ++++: &sb->s_type->i_mutex_key ->namespace_sem ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->tomoyo_ss ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#2 ->&wb->list_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 44 BD: 207 +.+.: rename_lock ->rename_lock.seqcount FD: 43 BD: 344 +.+.: rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/2 ->&dentry->d_lock/1 FD: 1 BD: 290 ....: &new_ns->poll FD: 44 BD: 251 +.+.: &fs->lock ->&____s->seqcount#3 ->&dentry->d_lock FD: 1 BD: 134 +.+.: req_lock FD: 1 BD: 1 +.+.: (of_reconfig_chain).rwsem FD: 159 BD: 1 +.+.: of_mutex ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem FD: 1 BD: 203 ....: &x->wait#8 FD: 1 BD: 237 +.+.: &k->list_lock FD: 28 BD: 208 ++++: bus_type_sem ->&rq->__lock FD: 37 BD: 524 -...: &dev->power.lock ->&dev->power.wait_queue ->hrtimer_bases.lock ->&dev->power.lock/1 FD: 29 BD: 206 +.+.: dpm_list_mtx ->&rq->__lock ->rcu_node_0 ->&obj_hash[i].lock ->pool_lock#2 FD: 84 BD: 211 +.+.: uevent_sock_mutex ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.gp_wq ->fs_reclaim ->nl_table_lock ->rlock-AF_NETLINK ->nl_table_wait.lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->uevent_sock_mutex.wait_lock ->&____s->seqcount#2 ->mmu_notifier_invalidate_range_start ->&rcu_state.expedited_wq FD: 1 BD: 198 ....: running_helpers_waitq.lock FD: 1 BD: 228 +.+.: sysfs_symlink_target_lock FD: 2 BD: 278 +.+.: &k->k_lock ->klist_remove_lock FD: 1 BD: 1 ....: &dev->mutex FD: 1 BD: 1 +.+.: subsys mutex FD: 2 BD: 1 +.+.: memory_blocks.xa_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: subsys mutex#2 FD: 80 BD: 33 +.+.: register_lock ->proc_subdir_lock ->fs_reclaim ->pool_lock#2 ->proc_inum_ida.xa_lock ->&c->lock ->&____s->seqcount FD: 1 BD: 1 +.+.: (cpufreq_policy_notifier_list).rwsem FD: 1 BD: 15 +.+.: (pm_chain_head).rwsem FD: 1 BD: 1 +.+.: cpufreq_governor_mutex FD: 29 BD: 887 -.-.: &rcu_state.expedited_wq ->&p->pi_lock FD: 1 BD: 2 ++++: binfmt_lock FD: 1 BD: 161 +.+.: pin_fs_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#8/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->&____s->seqcount ->&c->lock ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#7 ->&dentry->d_lock FD: 43 BD: 3 +.+.: &sb->s_type->i_lock_key#7 ->&dentry->d_lock FD: 111 BD: 1 +.+.: &sb->s_type->i_mutex_key#2 ->&sb->s_type->i_lock_key#7 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount FD: 29 BD: 752 ....: &wq ->&p->pi_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#9/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#8 ->&dentry->d_lock FD: 43 BD: 159 +.+.: &sb->s_type->i_lock_key#8 ->&dentry->d_lock FD: 159 BD: 157 ++++: &sb->s_type->i_mutex_key#3 ->&sb->s_type->i_lock_key#8 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock ->&p->pi_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->(console_sem).lock ->&n->list_lock ->pin_fs_lock ->mount_lock ->&fsnotify_mark_srcu ->&xa->xa_lock#9 ->&obj_hash[i].lock ->&____s->seqcount#2 ->&xa->xa_lock#5 ->stock_lock ->remove_cache_srcu ->&rcu_state.gp_wq ->namespace_sem ->key ->pcpu_lock ->percpu_counters_lock FD: 28 BD: 42 +.+.: chrdevs_lock ->&rq->__lock FD: 2 BD: 1 .+.+: gpio_devices_srcu ->gpio_lookup_lock FD: 1111 BD: 4 ++++: cb_lock ->genl_mutex ->fs_reclaim ->pool_lock#2 ->rlock-AF_NETLINK ->&c->lock ->&____s->seqcount ->rtnl_mutex ->&obj_hash[i].lock ->&n->list_lock ->&rdev->wiphy.mtx ->nlk_cb_mutex-GENERIC ->&xa->xa_lock#16 ->rcu_node_0 ->&rcu_state.expedited_wq ->genl_mutex.wait_lock ->&p->pi_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock ->rtnl_mutex.wait_lock ->&____s->seqcount#2 ->&zone->lock ->&dir->lock#2 ->&lock->wait_lock ->(console_sem).lock ->quarantine_lock ->remove_cache_srcu ->rcu_state.barrier_mutex ->lweventlist_lock ->pcpu_lock ->krc.lock ->netdev_unregistering_wq.lock FD: 1098 BD: 6 +.+.: genl_mutex ->fs_reclaim ->pool_lock#2 ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->rlock-AF_NETLINK ->&obj_hash[i].lock ->&n->list_lock ->&zone->lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->genl_mutex.wait_lock ->hwsim_radio_lock ->&x->wait#8 ->batched_entropy_u32.lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#52 ->device_links_lock ->&k->k_lock ->deferred_probe_mutex ->wq_pool_mutex ->crngs.lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->(inetaddr_chain).rwsem ->inet6addr_chain.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->remove_cache_srcu ->&____s->seqcount#2 ->(console_sem).lock ->rcu_node_0 ->&pn->l2tp_tunnel_idr_lock ->sk_lock-AF_INET6 ->slock-AF_INET6 ->&sdata->sec_mtx ->&sem->wait_lock ->uevent_sock_mutex.wait_lock ->&base->lock ->(&timer.timer) FD: 1 BD: 3 +.+.: subsys mutex#3 FD: 4 BD: 5 ....: async_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 3 +.+.: regulator_list_mutex FD: 424 BD: 1 +.+.: (wq_completion)async ->(work_completion)(&entry->work) FD: 423 BD: 2 +.+.: (work_completion)(&entry->work) ->tk_core.seq.seqcount ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->async_lock ->async_done.lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->sb_writers#2 ->&sb->s_type->i_lock_key#2 FD: 1 BD: 19 .+.+: device_links_srcu FD: 3 BD: 18 +.+.: fwnode_link_lock ->&k->k_lock FD: 31 BD: 106 +.+.: device_links_lock ->&k->list_lock ->&k->k_lock ->&rq->__lock FD: 1 BD: 32 ....: &dev->devres_lock FD: 28 BD: 18 +.+.: pinctrl_list_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 18 +.+.: pinctrl_maps_mutex FD: 1 BD: 4 +.+.: regulator_nesting_mutex FD: 2 BD: 1 +.+.: regulator_ww_class_mutex ->regulator_nesting_mutex FD: 162 BD: 188 +.+.: gdp_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->kobj_ns_type_lock ->sysfs_symlink_target_lock ->kernfs_idr_lock ->&obj_hash[i].lock ->&____s->seqcount#2 ->&n->list_lock ->&rq->__lock FD: 1 BD: 1 +.+.: (reboot_notifier_list).rwsem FD: 3 BD: 3 +.+.: subsys mutex#4 ->&k->k_lock FD: 28 BD: 107 +.+.: deferred_probe_mutex ->&rq->__lock FD: 1 BD: 18 ....: probe_waitqueue.lock FD: 29 BD: 3 ....: async_done.lock ->&p->pi_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#10/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#9 ->&dentry->d_lock FD: 43 BD: 76 +.+.: &sb->s_type->i_lock_key#9 ->&dentry->d_lock ->&p->pi_lock FD: 85 BD: 75 +.+.: pack_mutex ->fs_reclaim ->&____s->seqcount ->&c->lock ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->init_mm.page_table_lock ->vmap_purge_lock ->patch_lock ->&rq->__lock FD: 33 BD: 77 +.+.: vmap_purge_lock ->&vn->lazy.lock ->init_mm.page_table_lock ->&vn->pool_lock ->free_vmap_area_lock ->&rq->__lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 119 +.+.: &vn->pool_lock FD: 29 BD: 73 +.+.: &fp->aux->used_maps_mutex ->&map->owner.lock ->&rq->__lock FD: 1 BD: 1 +.+.: proto_list_mutex FD: 1 BD: 1 +.+.: targets_mutex FD: 30 BD: 307 ...-: nl_table_lock ->pool_lock#2 ->&obj_hash[i].lock ->nl_table_wait.lock ->&c->lock ->&n->list_lock FD: 29 BD: 308 ..-.: nl_table_wait.lock ->&p->pi_lock FD: 1 BD: 1 +.+.: net_family_lock FD: 2 BD: 5 ....: net_generic_ids.xa_lock ->pool_lock#2 FD: 4 BD: 102 ..-.: &dir->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 40 BD: 5 +.+.: k-sk_lock-AF_NETLINK ->k-slock-AF_NETLINK ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq FD: 1 BD: 6 +...: k-slock-AF_NETLINK FD: 2 BD: 264 ..-.: rhashtable_bucket ->rhashtable_bucket/1 FD: 1 BD: 69 ....: &cma->lock FD: 50 BD: 69 +.+.: cma_mutex ->&zone->lock ->lock#2 ->&obj_hash[i].lock FD: 4 BD: 1 +.+.: pcpu_drain_mutex ->&pcp->lock FD: 44 BD: 180 +.+.: lock#2 ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#10 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 1 +.+.: &pool->lock#2 FD: 1 BD: 143 ....: freezer_lock FD: 1 BD: 1 ....: audit_backlog_wait.lock FD: 29 BD: 1 ....: kauditd_wait.lock ->&p->pi_lock FD: 1 BD: 1 ....: &list->lock FD: 1 BD: 1 ....: printk_ratelimit_state.lock FD: 3 BD: 2 +.+.: lock#3 ->&zone->lock FD: 82 BD: 1 +.+.: khugepaged_mutex ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->lock#3 ->pcp_batch_high_lock FD: 182 BD: 8 ++++: &(&priv->bus_notifier)->rwsem ->&device->physical_node_lock ->iommu_probe_device_lock ->&root->kernfs_rwsem ->fs_reclaim ->pool_lock#2 ->lock ->&c->lock ->&____s->seqcount ->i2c_dev_list_lock ->&x->wait#8 ->&obj_hash[i].lock ->chrdevs_lock ->&k->list_lock ->gdp_mutex ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#60 ->&sem->wait_lock FD: 3 BD: 35 +.+.: subsys mutex#5 ->&k->k_lock FD: 4 BD: 1 +.+.: subsys mutex#6 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 +.+.: regmap_debugfs_early_lock FD: 1 BD: 1 +.+.: (acpi_reconfig_chain).rwsem FD: 1 BD: 1 +.+.: __i2c_board_lock FD: 81 BD: 1 +.+.: core_lock ->&k->list_lock ->&k->k_lock ->&rq->__lock ->&cfs_rq->removed.lock ->pool_lock#2 ->&obj_hash[i].lock ->fs_reclaim FD: 1 BD: 843 -.-.: quarantine_lock FD: 38 BD: 521 .+.+: remove_cache_srcu ->quarantine_lock ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->&rq->__lock ->pool_lock#2 ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->&____s->seqcount ->rcu_node_0 ->&rcu_state.expedited_wq ->&rcu_state.gp_wq ->stock_lock ->&base->lock FD: 2 BD: 1 +.+.: thermal_governor_lock ->thermal_list_lock FD: 1 BD: 142 +.+.: thermal_list_lock FD: 33 BD: 1 +.+.: cpuidle_lock ->(console_sem).lock FD: 81 BD: 1 +.+.: k-sk_lock-AF_QIPCRTR ->k-slock-AF_QIPCRTR ->fs_reclaim ->qrtr_ports.xa_lock ->pool_lock#2 ->qrtr_node_lock ->&obj_hash[i].lock FD: 1 BD: 2 +...: k-slock-AF_QIPCRTR FD: 1 BD: 2 +.+.: qrtr_ports.xa_lock FD: 1 BD: 2 +.+.: qrtr_node_lock FD: 80 BD: 159 ++++: (crypto_chain).rwsem ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->&cfs_rq->removed.lock FD: 1198 BD: 2 +.+.: tty_mutex ->(console_sem).lock ->console_lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->tty_ldiscs_lock ->&obj_hash[i].lock ->&k->list_lock ->&k->k_lock ->&tty->legacy_mutex ->stock_lock ->&rq->__lock ->devpts_mutex ->&xa->xa_lock#23 ->rcu_node_0 FD: 317 BD: 1 +.+.: iova_cache_mutex ->slab_mutex ->cpu_hotplug_lock FD: 3 BD: 1 +.+.: subsys mutex#7 ->&k->k_lock FD: 1 BD: 1 ..-.: uidhash_lock FD: 32 BD: 2 +.+.: (work_completion)(&s->destroy_work) ->&obj_hash[i].lock ->pool_lock#2 ->&rsp->gp_wait ->pcpu_lock ->&base->lock ->quarantine_lock ->&rq->__lock FD: 81 BD: 1 +.+.: (work_completion)(&eval_map_work) ->trace_event_sem FD: 1 BD: 1 ....: oom_reaper_wait.lock FD: 1 BD: 2 +.+.: subsys mutex#8 FD: 1 BD: 1 ....: &pgdat->kcompactd_wait FD: 175 BD: 140 +.+.: memory_tier_lock ->fs_reclaim ->pool_lock#2 ->(memory_chain).rwsem ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#72 ->(console_sem).lock FD: 1 BD: 1 +.+.: ksm_thread_mutex FD: 1 BD: 1 ....: ksm_thread_wait.lock FD: 1 BD: 1 +.+.: damon_ops_lock FD: 81 BD: 158 ++++: crypto_alg_sem ->(crypto_chain).rwsem ->&rq->__lock FD: 1 BD: 153 +.+.: khugepaged_mm_lock FD: 29 BD: 153 ....: khugepaged_wait.lock ->&p->pi_lock FD: 18 BD: 929 ..-.: krc.lock ->&obj_hash[i].lock ->hrtimer_bases.lock ->&base->lock FD: 1 BD: 150 +.+.: wq_pool_mutex.wait_lock FD: 208 BD: 9 +.+.: bio_slab_lock ->fs_reclaim ->pool_lock#2 ->slab_mutex ->bio_slabs.xa_lock FD: 8 BD: 10 +.+.: bio_slabs.xa_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 FD: 79 BD: 1 +.+.: major_names_lock ->fs_reclaim ->pool_lock#2 ->major_names_spinlock ->&c->lock ->&____s->seqcount FD: 1 BD: 2 +.+.: major_names_spinlock FD: 84 BD: 1 +.+.: (wq_completion)events_highpri ->(work_completion)(&(&krcp->page_cache_work)->work) ->(work_completion)(flush) ->(work_completion)(&barr->work) FD: 79 BD: 2 +.+.: (work_completion)(&(&krcp->page_cache_work)->work) ->fs_reclaim ->&____s->seqcount ->krc.lock FD: 16 BD: 1 +.-.: (&rtpcp->lazy_timer) ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->rcu_tasks__percpu.cbs_pcpu_lock FD: 80 BD: 1 +.+.: &pgdat->kswapd_lock ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock FD: 35 BD: 1 ..-.: drivers/char/random.c:255 FD: 15 BD: 2 +.+.: (next_reseed).work ->&obj_hash[i].lock ->&base->lock ->input_pool.lock ->base_crng.lock FD: 35 BD: 1 ..-.: mm/vmstat.c:2046 FD: 317 BD: 2 +.+.: (shepherd).work ->cpu_hotplug_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 36 BD: 181 +.+.: (wq_completion)mm_percpu_wq ->(work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) ->(work_completion)(work) ->(work_completion)(&barr->work) FD: 28 BD: 182 +.+.: (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 3 BD: 35 +.+.: subsys mutex#9 ->&k->k_lock FD: 1 BD: 144 -...: shrink_qlist.lock FD: 35 BD: 5 ....: remove_cache_srcu_srcu_usage.lock ->&obj_hash[i].lock FD: 1 BD: 152 ....: &ACCESS_PRIVATE(sdp, lock) FD: 40 BD: 2 +.+.: (work_completion)(&(&ssp->srcu_sup->work)->work) ->&ssp->srcu_sup->srcu_gp_mutex ->remove_cache_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&base->lock ->&ssp->srcu_sup->srcu_cb_mutex ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&rq->__lock ->dquot_srcu_srcu_usage.lock FD: 39 BD: 3 +.+.: &ssp->srcu_sup->srcu_gp_mutex ->remove_cache_srcu_srcu_usage.lock ->&rq->__lock ->&ssp->srcu_sup->srcu_cb_mutex ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->dquot_srcu_srcu_usage.lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 29 BD: 152 ....: &x->wait#9 ->&p->pi_lock FD: 35 BD: 1 ..-.: &(&ssp->srcu_sup->work)->timer FD: 38 BD: 4 +.+.: &ssp->srcu_sup->srcu_cb_mutex ->remove_cache_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->dquot_srcu_srcu_usage.lock FD: 32 BD: 2 +.+.: (work_completion)(&sdp->work) ->&ACCESS_PRIVATE(sdp, lock) ->&obj_hash[i].lock ->&x->wait#9 ->&rq->__lock ->rcu_node_0 FD: 38 BD: 144 +.+.: flush_lock ->&obj_hash[i].lock ->&x->wait#10 ->&rq->__lock FD: 9 BD: 146 +.+.: (work_completion)(&sfw->work) ->&c->lock ->&n->list_lock ->&obj_hash[i].lock FD: 32 BD: 145 +.+.: (wq_completion)slub_flushwq ->(work_completion)(&sfw->work) ->(work_completion)(&barr->work) FD: 29 BD: 1159 ....: &x->wait#10 ->&p->pi_lock FD: 30 BD: 237 +.+.: (work_completion)(&barr->work) ->&x->wait#10 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: prepare_lock FD: 160 BD: 1 +.+.: clk_debug_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 FD: 1 BD: 1 +.+.: clocks_mutex FD: 1244 BD: 1 +.+.: acpi_scan_lock ->semaphore->lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#8 ->acpi_device_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#10 ->uevent_sock_mutex ->running_helpers_waitq.lock ->*(&acpi_gbl_reference_count_lock) ->&n->list_lock ->quarantine_lock ->acpi_ioremap_lock ->cpu_add_remove_lock ->&device->physical_node_lock ->irq_domain_mutex ->&domain->mutex ->resource_lock ->&(&priv->bus_notifier)->rwsem ->fwnode_link_lock ->device_links_srcu ->devtree_lock ->acpi_pm_notifier_install_lock ->free_vmap_area_lock ->&vn->busy.lock ->subsys mutex#3 ->init_mm.page_table_lock ->(console_sem).lock ->io_range_mutex ->pci_bus_sem ->gdp_mutex ->subsys mutex#16 ->acpi_hp_context_lock ->bridge_mutex ->pci_lock ->pci_acpi_companion_lookup_sem ->pci_slot_mutex ->resource_alignment_lock ->iort_msi_chip_lock ->subsys mutex#17 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->pci_rescan_remove_lock ->acpi_link_lock ->acpi_dep_list_lock ->power_resource_list_lock FD: 80 BD: 2 +.+.: acpi_device_lock ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#2 ->semaphore->lock ->&obj_hash[i].lock FD: 1 BD: 3 ....: &xa->xa_lock#2 FD: 1 BD: 2 +.+.: subsys mutex#10 FD: 1 BD: 139 +.+.: subsys mutex#11 FD: 82 BD: 170 +.+.: dev_pm_qos_mtx ->fs_reclaim ->pool_lock#2 ->&dev->power.lock ->pm_qos_lock ->&c->lock ->&____s->seqcount ->&rq->__lock FD: 1 BD: 175 ....: pm_qos_lock FD: 160 BD: 169 +.+.: dev_pm_qos_sysfs_mtx ->dev_pm_qos_mtx ->&root->kernfs_rwsem ->fs_reclaim ->pool_lock#2 ->lock ->&c->lock ->&____s->seqcount ->&sem->wait_lock ->&rq->__lock ->&p->pi_lock FD: 160 BD: 45 +.+.: &device->physical_node_lock ->sysfs_symlink_target_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&obj_hash[i].lock ->&____s->seqcount ->&c->lock FD: 180 BD: 2 +.+.: acpi_pm_notifier_install_lock ->semaphore->lock ->fs_reclaim ->pool_lock#2 ->*(&acpi_gbl_reference_count_lock) ->acpi_pm_notifier_lock ->&c->lock ->&____s->seqcount FD: 177 BD: 3 +.+.: acpi_pm_notifier_lock ->fs_reclaim ->pool_lock#2 ->wakeup_ida.xa_lock ->&x->wait#8 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#12 ->events_lock FD: 1 BD: 5 ....: wakeup_ida.xa_lock FD: 3 BD: 5 +.+.: subsys mutex#12 ->&k->k_lock FD: 1 BD: 5 ....: events_lock FD: 1 BD: 2 +.+.: acpi_wakeup_lock FD: 533 BD: 2 +.+.: port_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#13 ->&xa->xa_lock#3 ->&c->lock ->&port->mutex FD: 1 BD: 3 +.+.: subsys mutex#13 FD: 1 BD: 3 ....: &xa->xa_lock#3 FD: 1 BD: 525 ....: &dev->power.wait_queue FD: 107 BD: 1 +.+.: (wq_completion)pm ->(work_completion)(&dev->power.work) ->(work_completion)(&hcd->wakeup_work) FD: 99 BD: 2 +.+.: (work_completion)(&dev->power.work) ->&dev->power.lock ->&port_lock_key ->&hub->irq_urb_lock ->(&hub->irq_urb_retry) ->&obj_hash[i].lock ->&base->lock ->hcd_urb_unlink_lock ->hcd_root_hub_lock ->usb_kill_urb_queue.lock ->&rq->__lock ->&dum_hcd->dum->lock ->device_state_lock ->hcd_urb_list_lock ->&vhci_hcd->vhci->lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->fs_reclaim ->&x->wait#19 ->(&timer.timer) ->&c->lock ->&____s->seqcount ->&cfs_rq->removed.lock FD: 523 BD: 29 +.+.: &port->mutex ->fs_reclaim ->pool_lock#2 ->(console_sem).lock ->&port_lock_key ->console_mutex ->batched_entropy_u8.lock ->kfence_freelist_lock ->ctrl_ida.xa_lock ->&x->wait#8 ->&obj_hash[i].lock ->&dev->power.lock ->&k->list_lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->&device->physical_node_lock ->semaphore->lock ->&c->lock ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#14 ->*(&acpi_gbl_reference_count_lock) ->&n->list_lock ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->gdp_mutex ->bus_type_sem ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->subsys mutex#15 ->chrdevs_lock ->&cfs_rq->removed.lock ->&desc->request_mutex ->register_lock ->&irq_desc_lock_class ->proc_subdir_lock ->proc_inum_ida.xa_lock FD: 42 BD: 500 ..-.: &port_lock_key ->&dev->power.lock ->&port->lock ->&tty->write_wait FD: 31 BD: 31 +.+.: syslog_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 44 BD: 497 ..-.: console_owner ->&port_lock_key ->console_owner_lock FD: 35 BD: 1 ..-.: &(&group->avgs_work)->timer FD: 35 BD: 1 ..-.: &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer FD: 35 BD: 1 ..-.: mm/memcontrol.c:511 FD: 29 BD: 2 +.+.: (work_completion)(&(&group->avgs_work)->work) ->&group->avgs_lock ->&rq->__lock FD: 28 BD: 3 +.+.: &group->avgs_lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 28 BD: 2 +.+.: (stats_flush_dwork).work ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 1 BD: 1 ....: cgroup_rstat_lock FD: 1 BD: 1 ....: per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 30 ....: ctrl_ida.xa_lock FD: 1 BD: 30 +.+.: subsys mutex#14 FD: 1 BD: 279 +.+.: klist_remove_lock FD: 166 BD: 1 .+.+: sb_writers ->mount_lock ->&type->i_mutex_dir_key/1 ->&sb->s_type->i_mutex_key#4 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#6 ->&wb->list_lock ->&type->i_mutex_dir_key#2 ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&s->s_inode_list_lock ->&obj_hash[i].lock ->&sbinfo->stat_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu FD: 152 BD: 2 +.+.: &type->i_mutex_dir_key/1 ->rename_lock.seqcount ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&xattrs->lock ->&simple_offset_lock_class ->&sb->s_type->i_lock_key#6 ->&sb->s_type->i_mutex_key#4 ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->&base->lock ->tomoyo_ss ->key ->pcpu_lock ->percpu_counters_lock ->&u->bindlock ->&n->list_lock ->remove_cache_srcu ->&sem->wait_lock ->&fsnotify_mark_srcu ->&type->i_mutex_dir_key#2 ->&____s->seqcount#2 ->&xa->xa_lock#9 ->cdev_lock ->&sb->s_type->i_mutex_key#4/4 ->krc.lock ->rcu_node_0 FD: 1 BD: 27 ++++: &xattrs->lock FD: 14 BD: 31 +.+.: &simple_offset_lock_class ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&____s->seqcount ->quarantine_lock ->&n->list_lock ->&____s->seqcount#2 FD: 30 BD: 3 +.+.: &sb->s_type->i_mutex_key#4 ->tk_core.seq.seqcount ->tomoyo_ss ->&xattrs->lock ->&rq->__lock ->&simple_offset_lock_class ->&dentry->d_lock ->&sb->s_type->i_mutex_key#4/4 FD: 29 BD: 134 ....: &x->wait#11 ->&p->pi_lock FD: 3 BD: 30 +.+.: subsys mutex#15 ->&k->k_lock FD: 1 BD: 2 +.+.: io_range_mutex FD: 1 BD: 2 ++++: pci_bus_sem FD: 3 BD: 2 +.+.: subsys mutex#16 ->&k->k_lock FD: 78 BD: 2 +.+.: acpi_hp_context_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 2 +.+.: bridge_mutex FD: 1 BD: 10 ....: pci_lock FD: 1 BD: 2 .+.+: pci_acpi_companion_lookup_sem FD: 1 BD: 2 +.+.: pci_slot_mutex FD: 1 BD: 2 +.+.: resource_alignment_lock FD: 1 BD: 525 ....: &dev->power.lock/1 FD: 1 BD: 2 +.+.: subsys mutex#17 FD: 41 BD: 2 +.+.: pci_rescan_remove_lock FD: 91 BD: 4 +.+.: acpi_link_lock ->fs_reclaim ->pool_lock#2 ->semaphore->lock ->&obj_hash[i].lock ->*(&acpi_gbl_reference_count_lock) ->(console_sem).lock ->&c->lock ->&____s->seqcount FD: 1 BD: 2 +.+.: acpi_dep_list_lock FD: 1 BD: 2 +.+.: power_resource_list_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#11/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#10 ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#10 ->&dentry->d_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#12/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#11 ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#11 ->&dentry->d_lock FD: 438 BD: 150 ++++: &mm->mmap_lock ->reservation_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&vma->vm_lock->lock ->&____s->seqcount ->&mm->page_table_lock ->ptlock_ptr(ptdesc)#2 ->&c->lock ->&anon_vma->rwsem ->mmu_notifier_invalidate_range_start ->lock#4 ->lock#5 ->&obj_hash[i].lock ->&mapping->i_mmap_rwsem ->batched_entropy_u8.lock ->kfence_freelist_lock ->&p->alloc_lock ->rcu_node_0 ->&rq->__lock ->&lruvec->lru_lock ->tk_core.seq.seqcount ->&mm->mmap_lock/1 ->&sem->wait_lock ->&p->pi_lock ->&rcu_state.gp_wq ->&sb->s_type->i_lock_key#23 ->&n->list_lock ->remove_cache_srcu ->&meta->lock ->quarantine_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&kcov->lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->sb_pagefaults ->&mapping->i_private_lock ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->batched_entropy_u32.lock ->&xa->xa_lock#9 ->&info->lock ->&____s->seqcount#2 ->binder_alloc_mmap_lock ->&rq->__lock/1 ->&tree->lock#2 ->&tree->lock ->&sb->s_type->i_lock_key#38 ->swap_slots_cache_mutex ->&cache->alloc_lock ->&xa->xa_lock#27 ->&((cluster_info + ci)->lock)#2 ->&xa->xa_lock#5 ->&acomp_ctx->mutex ->&xa->xa_lock#28 ->&folio_wait_table[i] ->sb_pagefaults#2 ->lock#2 ->&vn->busy.lock ->khugepaged_mm_lock ->khugepaged_wait.lock ->mapping.invalidate_lock ->(console_sem).lock FD: 98 BD: 152 +.+.: reservation_ww_class_acquire ->reservation_ww_class_mutex FD: 97 BD: 153 +.+.: reservation_ww_class_mutex ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key ->&info->lock ->lock#4 ->&rq->__lock ->free_vmap_area_lock ->&vn->busy.lock ->init_mm.page_table_lock ->stock_lock ->&obj_hash[i].lock ->&vn->lazy.lock ->lock#5 FD: 75 BD: 755 ++++: &mapping->i_mmap_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->&anon_vma->rwsem ->&rq->__lock ->&sem->wait_lock ->mmu_notifier_invalidate_range_start ->ptlock_ptr(ptdesc)#2 ->rcu_node_0 ->&rcu_state.gp_wq ->quarantine_lock ->lock#4 ->lock#5 ->&cfs_rq->removed.lock ->&xa->xa_lock#9 FD: 1 BD: 915 +.+.: dma_fence_map FD: 1 BD: 725 ....: key FD: 1 BD: 1 +.+.: attribute_container_mutex FD: 28 BD: 28 +.+.: triggers_list_lock ->&rq->__lock FD: 28 BD: 28 .+.+: leds_list_lock ->&rq->__lock FD: 220 BD: 4 ++++: (usb_notifier_list).rwsem ->fs_reclaim ->pool_lock#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&x->wait#8 ->&obj_hash[i].lock ->&____s->seqcount ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#56 ->mon_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->usbfs_mutex FD: 1 BD: 1 +.+.: rc_map_lock FD: 1 BD: 1 +.+.: subsys mutex#18 FD: 2 BD: 9 +.+.: iommu_probe_device_lock ->iommu_device_lock FD: 1 BD: 10 +.+.: iommu_device_lock FD: 1 BD: 7 ....: (efi_runtime_lock).lock FD: 29 BD: 9 ....: &x->wait#12 ->&p->pi_lock FD: 32 BD: 1 +.+.: (wq_completion)efi_rts_wq ->(work_completion)(&efi_rts_work.work) FD: 31 BD: 2 +.+.: (work_completion)(&efi_rts_work.work) ->cpu_asid_lock ->efi_rt_lock ->&x->wait#12 ->&rq->__lock FD: 1 BD: 1551 ....: cpu_asid_lock FD: 1 BD: 3 +.+.: efi_rt_lock FD: 1 BD: 6 ....: (efivars_lock).lock FD: 1 BD: 1 +.+.: devfreq_list_lock FD: 1 BD: 2 +.+.: &entry->access FD: 80 BD: 2 +.+.: info_mutex ->proc_subdir_lock ->fs_reclaim ->pool_lock#2 ->proc_inum_ida.xa_lock ->&c->lock ->&____s->seqcount FD: 1 BD: 189 +.+.: kobj_ns_type_lock FD: 81 BD: 68 +.+.: page_pools_lock ->fs_reclaim ->page_pools.xa_lock ->rcu_node_0 ->&rq->__lock ->pool_lock#2 ->&____s->seqcount ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->tk_core.seq.seqcount ->&c->lock ->&n->list_lock FD: 2 BD: 69 +.+.: page_pools.xa_lock ->pool_lock#2 FD: 9 BD: 73 +.+.: &xa->xa_lock#4 ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock FD: 30 BD: 72 +.+.: subsys mutex#19 ->&k->k_lock ->&rq->__lock FD: 8 BD: 179 ..-.: &dir->lock#2 ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&____s->seqcount ->quarantine_lock FD: 40 BD: 80 +.+.: dev_hotplug_mutex ->&dev->power.lock ->&k->k_lock ->&rq->__lock FD: 1 BD: 83 +...: netdev_rename_lock.seqcount FD: 1 BD: 67 ++++: qdisc_mod_lock FD: 19 BD: 1 ++++: bt_proto_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->chan_list_lock ->l2cap_sk_list.lock ->&sk->sk_peer_lock ->hci_sk_list.lock ->&n->list_lock ->sco_sk_list.lock FD: 78 BD: 29 +.+.: hci_cb_list_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->chan_list_lock ->&conn->ident_lock ->&base->lock ->&list->lock#8 ->&conn->chan_lock ->&c->lock ->&____s->seqcount ->&x->wait#2 ->&rq->__lock ->hci_cb_list_lock.wait_lock ->rcu_node_0 ->&n->list_lock ->remove_cache_srcu FD: 1 BD: 1 +.+.: mgmt_chan_list_lock FD: 1 BD: 92 ....: &list->lock#2 FD: 78 BD: 70 +.+.: rate_ctrl_mutex ->fs_reclaim ->pool_lock#2 FD: 2 BD: 1 +.+.: netlbl_domhsh_lock ->pool_lock#2 FD: 1 BD: 67 +.+.: netlbl_unlhsh_lock FD: 194 BD: 1 +.+.: misc_mtx ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#20 ->misc_minors_ida.xa_lock ->&c->lock ->quarantine_lock ->&zone->lock ->&cfs_rq->removed.lock ->&base->lock ->&dir->lock ->rfkill_global_mutex ->&n->list_lock ->vga_user_lock ->&____s->seqcount#2 ->stock_lock ->remove_cache_srcu ->crngs.lock ->&fc->lock ->&fc->bg_lock ->rcu_node_0 FD: 3 BD: 2 +.+.: subsys mutex#20 ->&k->k_lock FD: 179 BD: 2 +.+.: (work_completion)(&rfkill_global_led_trigger_work) ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&rq->__lock FD: 178 BD: 27 +.+.: rfkill_global_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&rfkill->lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#39 ->triggers_list_lock ->leds_list_lock ->&rq->__lock ->rfkill_global_mutex.wait_lock ->&n->list_lock ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->&data->mtx ->&____s->seqcount#2 ->remove_cache_srcu ->&sem->wait_lock ->quarantine_lock FD: 191 BD: 2 +.+.: input_mutex ->input_devices_poll_wait.lock ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->&dev->mutex#2 ->pool_lock#2 ->input_ida.xa_lock ->&x->wait#8 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->chrdevs_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->&sem->wait_lock ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->mmu_notifier_invalidate_range_start ->deferred_probe_mutex ->device_links_lock ->&n->list_lock ->cdev_lock ->&evdev->mutex ->&evdev->client_lock ->&x->wait#2 ->(&dev->timer) ->&base->lock FD: 1 BD: 3 ....: input_devices_poll_wait.lock FD: 131 BD: 4 ++++: (netlink_chain).rwsem ->hwsim_radio_lock ->pool_lock#2 ->&obj_hash[i].lock ->reg_indoor_lock ->&rq->__lock ->&q->instances_lock ->&log->instances_lock ->&nft_net->commit_mutex ->&c->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&n->list_lock FD: 10 BD: 1 ++++: proto_tab_lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&c->lock FD: 3 BD: 1 ....: random_ready_notifier.lock ->crngs.lock FD: 1 BD: 2 ....: misc_minors_ida.xa_lock FD: 1 BD: 1 +.+.: wtd_deferred_reg_mutex FD: 119 BD: 1 +.+.: &type->s_umount_key#13/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#12 ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#12 ->&dentry->d_lock FD: 318 BD: 2 +.+.: timer_update_work ->timer_keys_mutex FD: 317 BD: 3 +.+.: timer_keys_mutex ->cpu_hotplug_lock FD: 3 BD: 1626 ..-.: &base->lock/1 ->&tmc->lock FD: 131 BD: 1 +.+.: (work_completion)(&tracerfs_init_work) ->pin_fs_lock ->fs_reclaim ->pool_lock#2 ->sb_lock ->&type->s_umount_key#14/1 ->&type->s_umount_key#15 ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->&obj_hash[i].lock ->&fsnotify_mark_srcu ->&sb->s_type->i_mutex_key#5 ->event_mutex ->(module_notify_list).rwsem ->trace_types_lock FD: 121 BD: 2 +.+.: &type->s_umount_key#14/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->&c->lock ->&____s->seqcount ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#13 ->&dentry->d_lock ->bit_wait_table + i ->&type->s_umount_key#15 FD: 43 BD: 6 +.+.: &sb->s_type->i_lock_key#13 ->&dentry->d_lock FD: 29 BD: 600 -.-.: bit_wait_table + i ->&p->pi_lock FD: 95 BD: 3 +.+.: &type->s_umount_key#15 ->sb_lock ->list_lrus_mutex ->&xa->xa_lock#5 ->&obj_hash[i].lock ->pool_lock#2 ->shrinker_mutex ->&rsp->gp_wait ->pcpu_lock ->&dentry->d_lock FD: 112 BD: 4 +.+.: &sb->s_type->i_mutex_key#5 ->&sb->s_type->i_lock_key#13 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&____s->seqcount ->&c->lock ->tracefs_inode_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount FD: 13 BD: 949 ....: &xa->xa_lock#5 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&____s->seqcount#2 ->&n->list_lock FD: 4 BD: 50 ..-.: &rsp->gp_wait ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 5 ....: tracefs_inode_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#16/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#14 ->&dentry->d_lock FD: 78 BD: 323 .+.+: &fsnotify_mark_srcu ->&conn->lock ->fs_reclaim ->pool_lock#2 ->&group->notification_lock ->&group->notification_waitq ->&rq->__lock ->&c->lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->remove_cache_srcu ->&n->list_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#14 ->&dentry->d_lock FD: 118 BD: 2 +.+.: event_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->trace_event_sem ->trace_types_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#17/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#15 ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#15 ->&dentry->d_lock FD: 1 BD: 5 +.+.: eventfs_mutex FD: 119 BD: 1 +.+.: &type->s_umount_key#18/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#16 ->&dentry->d_lock FD: 43 BD: 69 +.+.: &sb->s_type->i_lock_key#16 ->&dentry->d_lock FD: 79 BD: 1 +.+.: kclist_lock ->resource_lock ->fs_reclaim ->pool_lock#2 FD: 119 BD: 1 +.+.: &type->s_umount_key#19/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#17 ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#17 ->&dentry->d_lock FD: 444 BD: 82 .+.+: tomoyo_ss ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tomoyo_policy_lock ->(console_sem).lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&dentry->d_lock ->tomoyo_log_lock ->tomoyo_log_wait.lock ->&rq->__lock ->file_systems_lock ->fs_reclaim ->quarantine_lock ->&mm->mmap_lock ->rcu_node_0 ->remove_cache_srcu ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock ->&rcu_state.expedited_wq ->mount_lock ->&root->kernfs_iattr_rwsem ->&root->kernfs_rwsem ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->&fs->lock ->&rcu_state.gp_wq ->key ->pcpu_lock ->percpu_counters_lock FD: 78 BD: 1 +.+.: pnp_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: subsys mutex#21 FD: 3 BD: 1 +.+.: subsys mutex#22 ->&k->k_lock FD: 3 BD: 35 +.+.: subsys mutex#23 ->&k->k_lock FD: 4 BD: 1 +.+.: subsys mutex#24 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 ....: netevent_notif_chain.lock FD: 80 BD: 8 ++++: devices_rwsem ->clients_rwsem ->rcu_node_0 ->&rq->__lock ->&rcu_state.expedited_wq FD: 79 BD: 9 +.+.: clients_rwsem ->fs_reclaim ->clients.xa_lock FD: 2 BD: 10 +.+.: clients.xa_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: (blocking_lsm_notifier_chain).rwsem FD: 149 BD: 67 ++++: (inetaddr_chain).rwsem ->fs_reclaim ->&c->lock ->pool_lock#2 ->pcpu_alloc_mutex ->&____s->seqcount ->fib_info_lock ->&dir->lock#2 ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&net->sctp.local_addr_lock ->&rq->__lock ->rlock-AF_NETLINK ->rcu_node_0 ->&n->list_lock ->remove_cache_srcu ->quarantine_lock ->&ipvlan->addrs_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->krc.lock ->&tbl->lock ->class ->(&tbl->proxy_timer) ->&base->lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&meta->lock ->&dir->lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&rcu_state.gp_wq FD: 1 BD: 11 ....: inet6addr_chain.lock FD: 1 BD: 1 +.+.: buses_mutex FD: 1 BD: 1 +.+.: offload_lock FD: 1 BD: 1 +...: inetsw_lock FD: 1 BD: 73 +.+.: ptype_lock FD: 1111 BD: 1 +.+.: (wq_completion)events_power_efficient ->(work_completion)(&(&tbl->managed_work)->work) ->(check_lifetime_work).work ->(work_completion)(&(&cache_cleaner)->work) ->(work_completion)(&(&ops->cursor_work)->work) ->(work_completion)(&(&hub->init_work)->work) ->(work_completion)(&(&gc_work->dwork)->work) ->(work_completion)(&(&tbl->gc_work)->work) ->(gc_work).work ->(crda_timeout).work ->&rq->__lock FD: 48 BD: 2 +.+.: (work_completion)(&(&tbl->managed_work)->work) ->&tbl->lock ->&rq->__lock FD: 47 BD: 135 +.-.: &tbl->lock ->&obj_hash[i].lock ->&base->lock ->&n->lock ->pool_lock#2 ->nl_table_lock ->rlock-AF_NETLINK ->nl_table_wait.lock ->&dir->lock#2 ->krc.lock ->batched_entropy_u32.lock ->&c->lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->tk_core.seq.seqcount ->&n->list_lock ->&____s->seqcount#2 ->quarantine_lock ->init_task.mems_allowed_seq.seqcount FD: 83 BD: 2 +.+.: pcpu_balance_work ->pcpu_alloc_mutex FD: 32 BD: 2 +.+.: (check_lifetime_work).work ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.expedited_wq ->&rcu_state.gp_wq FD: 1 BD: 67 +.+.: &net->rules_mod_lock FD: 12 BD: 76 +.-.: bh_lock ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&zone->lock ->&n->list_lock ->&____s->seqcount#2 ->init_task.mems_allowed_seq.seqcount FD: 1 BD: 1 +.+.: tcp_ulp_list_lock FD: 1 BD: 4 +.+.: &hashinfo->lock FD: 77 BD: 70 +.-.: k-slock-AF_INET/1 ->tk_core.seq.seqcount ->&obj_hash[i].lock ->pool_lock#2 ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->&base->lock ->slock-AF_INET#2 ->&c->lock ->&n->list_lock ->&msk->pm.lock FD: 1 BD: 1 +...: xfrm_state_afinfo_lock FD: 1 BD: 1 +.+.: xfrm_policy_afinfo_lock FD: 1 BD: 1 +...: xfrm_input_afinfo_lock FD: 1 BD: 1 +.+.: tcp_cong_list_lock FD: 1 BD: 1 +.+.: mptcp_sched_list_lock FD: 43 BD: 25 +.+.: &net->unx.table.locks[i] ->&dentry->d_lock FD: 2 BD: 7 +.+.: cache_list_lock ->&cd->hash_lock FD: 30 BD: 2 +.+.: (work_completion)(&(&cache_cleaner)->work) ->cache_list_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 1 BD: 1 +.+.: (rpc_pipefs_notifier_list).rwsem FD: 1 BD: 1 +.+.: svc_xprt_class_lock FD: 46 BD: 1 +.+.: xprt_list_lock ->(console_sem).lock FD: 185 BD: 3 ++++: umhelper_sem ->usermodehelper_disabled_waitq.lock ->fs_reclaim ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->subsys mutex#73 ->fw_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&x->wait#22 ->&base->lock ->&rq->__lock ->(&timer.timer) ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount FD: 1 BD: 4 ....: usermodehelper_disabled_waitq.lock FD: 35 BD: 749 +.+.: &dentry->d_lock/1 ->&dentry->d_lock/2 ->&p->pi_lock FD: 141 BD: 3 .+.+: sb_writers#2 ->mount_lock ->&sb->s_type->i_mutex_key/1 ->&sb->s_type->i_mutex_key FD: 135 BD: 4 +.+.: &sb->s_type->i_mutex_key/1 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->tomoyo_ss ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#2 ->&____s->seqcount ->&c->lock ->&sb->s_type->i_mutex_key FD: 1 BD: 2 +.+.: tomoyo_log_lock FD: 1 BD: 2 ....: tomoyo_log_wait.lock FD: 81 BD: 362 +.+.: &wb->list_lock ->&sb->s_type->i_lock_key#2 ->&sb->s_type->i_lock_key#24 ->&sb->s_type->i_lock_key#23 ->&sb->s_type->i_lock_key ->&sb->s_type->i_lock_key#6 ->&sb->s_type->i_lock_key#25 ->&sb->s_type->i_lock_key#3 ->&sb->s_type->i_lock_key#34 ->&sb->s_type->i_lock_key#36 ->&sb->s_type->i_lock_key#35 ->&sb->s_type->i_lock_key#38 ->&sb->s_type->i_lock_key#41 ->&sb->s_type->i_lock_key#42 ->&sb->s_type->i_lock_key#28 ->&sb->s_type->i_lock_key#32 ->&sb->s_type->i_lock_key#45 ->&sb->s_type->i_lock_key#44 ->&sb->s_type->i_lock_key#50 ->&sb->s_type->i_lock_key#49 FD: 199 BD: 2 +.+.: (work_completion)(&sub_info->work) ->&sighand->siglock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->init_mm.page_table_lock ->batched_entropy_u64.lock ->&obj_hash[i].lock ->&c->lock ->init_files.file_lock ->init_fs.lock ->&p->alloc_lock ->lock ->pidmap_lock ->cgroup_threadgroup_rwsem ->input_pool.lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&sig->wait_chldexit ->tasklist_lock ->&prev->lock ->&(&sig->stats_lock)->lock ->&x->wait#17 ->&vn->pool_lock ->&n->list_lock FD: 1 BD: 1 +.+.: umh_sysctl_lock FD: 1 BD: 3 +.+.: &drv->dynids.lock FD: 439 BD: 2 +.+.: &tsk->futex_exit_mutex ->&p->pi_lock ->&rq->__lock ->&mm->mmap_lock FD: 28 BD: 1 +.+.: &child->perf_event_mutex ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 223 ....: &pid->wait_pidfd FD: 29 BD: 221 ....: &sig->wait_chldexit ->&p->pi_lock FD: 33 BD: 221 ....: &(&sig->stats_lock)->lock ->&____s->seqcount#4 FD: 32 BD: 222 ....: &____s->seqcount#4 ->&pid->wait_pidfd ->pidmap_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: subsys mutex#25 FD: 199 BD: 1 +.+.: subsys mutex#26 ->&k->list_lock ->&k->k_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->platform_devid_ida.xa_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#3 ->wakeup_ida.xa_lock ->gdp_mutex ->subsys mutex#12 ->events_lock ->rtcdev_lock FD: 1 BD: 1 +.+.: subsys mutex#27 FD: 1 BD: 142 +.+.: subsys mutex#28 FD: 1 BD: 19 +.+.: key_user_lock FD: 1 BD: 21 +.+.: key_serial_lock FD: 6 BD: 20 +.+.: key_construction_mutex ->&obj_hash[i].lock ->pool_lock#2 ->keyring_name_lock ->root_key_user.lock FD: 85 BD: 18 +.+.: &type->lock_class ->keyring_serialise_link_lock ->fs_reclaim ->pool_lock#2 ->key_user_lock ->&____s->seqcount ->&c->lock ->crngs.lock ->key_serial_lock ->key_construction_mutex ->ima_keys_lock ->&obj_hash[i].lock ->root_key_user.lock FD: 81 BD: 19 +.+.: keyring_serialise_link_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->root_key_user.lock ->key_construction_mutex FD: 1 BD: 1 ....: &pgdat->kswapd_wait FD: 1 BD: 1 +.+.: drivers_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#20/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->&____s->seqcount ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#18 ->&dentry->d_lock FD: 59 BD: 6 +.+.: &sb->s_type->i_lock_key#18 ->&dentry->d_lock ->&xa->xa_lock#9 FD: 119 BD: 1 +.+.: &type->s_umount_key#21/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#19 ->&dentry->d_lock FD: 43 BD: 8 +.+.: &sb->s_type->i_lock_key#19 ->&dentry->d_lock FD: 1 BD: 1 +.+.: configfs_subsystem_mutex FD: 119 BD: 1 +.+.: &sb->s_type->i_mutex_key#6/1 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&____s->seqcount ->&c->lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#19 ->&default_group_class[depth - 1]/2 ->&sb->s_type->i_mutex_key#7/2 ->&default_group_class[depth - 1]#3 FD: 1 BD: 9 +.+.: configfs_dirent_lock FD: 116 BD: 2 +.+.: &default_group_class[depth - 1]/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#19 ->&default_group_class[depth - 1]#4/2 ->&c->lock FD: 78 BD: 1 +.+.: ecryptfs_daemon_hash_mux ->fs_reclaim ->pool_lock#2 FD: 2 BD: 1 +.+.: ecryptfs_msg_ctx_lists_mux ->&ecryptfs_msg_ctx_arr[i].mux FD: 1 BD: 2 +.+.: &ecryptfs_msg_ctx_arr[i].mux FD: 1 BD: 1 ....: &ecryptfs_kthread_ctl.wait FD: 1 BD: 1 +.+.: nfs_version_lock FD: 104 BD: 1 ++++: key_types_sem ->(console_sem).lock ->asymmetric_key_parsers_sem ->&type->lock_class ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim FD: 1 BD: 1 +.+.: pnfs_spinlock FD: 1 BD: 5 +.+.: &sn->pipefs_sb_lock FD: 1 BD: 5 +.+.: nls_lock FD: 35 BD: 1 ..-.: &(&cache_cleaner)->timer FD: 44 BD: 1 +...: put_task_map-wait-type-override ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&base->lock ->task_group_lock ->stock_lock ->percpu_counters_lock ->pcpu_lock ->css_set_lock FD: 14 BD: 1 +.-.: (&tcp_orphan_timer) ->&obj_hash[i].lock ->&base->lock FD: 41 BD: 2 +.+.: (work_completion)(&p->wq) ->&vn->busy.lock ->&obj_hash[i].lock ->&vn->lazy.lock ->pool_lock#2 ->&rq->__lock ->&base->lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->&rcu_state.expedited_wq FD: 1 BD: 1 +.+.: jffs2_compressor_list_lock FD: 1 BD: 1 +.+.: next_tag_value_lock FD: 1 BD: 1 ....: log_redrive_lock FD: 30 BD: 14 ..-.: &TxAnchor.LazyLock ->jfs_commit_thread_wait.lock FD: 29 BD: 15 ..-.: jfs_commit_thread_wait.lock ->&p->pi_lock FD: 3 BD: 8 +.+.: jfsTxnLock ->&TxBlock[k].waitor ->&TxAnchor.freelockwait FD: 46 BD: 1 +.+.: ocfs2_stack_lock ->(console_sem).lock FD: 1 BD: 1 +.+.: o2hb_callback_sem FD: 1 BD: 1 +.+.: o2net_handler_lock FD: 3 BD: 1 +.+.: subsys mutex#29 ->&k->k_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#22/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#20 ->&dentry->d_lock ->&n->list_lock ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock FD: 43 BD: 7 +.+.: &sb->s_type->i_lock_key#20 ->&dentry->d_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#23/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&rq->__lock ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#21 ->&dentry->d_lock ->&c->lock ->&____s->seqcount FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#21 ->&dentry->d_lock FD: 1 BD: 1 +.+.: cipso_v4_doi_list_lock FD: 311 BD: 73 +.+.: nf_hook_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->stock_lock ->&____s->seqcount#2 ->&rq->__lock ->&n->list_lock ->cpu_hotplug_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 78 BD: 1 +.+.: bpf_crypto_types_sem ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 78 BD: 1 +.+.: alg_types_sem ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: dma_list_mutex FD: 95 BD: 2 ++++: asymmetric_key_parsers_sem ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->crypto_alg_sem ->&obj_hash[i].lock ->(crypto_chain).rwsem ->&x->wait#20 ->&base->lock ->&rq->__lock ->&cfs_rq->removed.lock ->(&timer.timer) FD: 1096 BD: 1 +.+.: blkcg_pol_register_mutex ->blkcg_pol_mutex ->cgroup_mutex FD: 1 BD: 2 +.+.: elv_list_lock FD: 82 BD: 1 +.+.: crc_t10dif_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 82 BD: 1 +.+.: crc64_rocksoft_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: ts_mod_lock FD: 1 BD: 1 +.+.: pci_ep_cfs_subsys.su_mutex FD: 112 BD: 1 +.+.: &default_group_class[depth - 1]#2/1 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#19 ->&sb->s_type->i_mutex_key#7/2 FD: 1 BD: 3 +.+.: &sb->s_type->i_mutex_key#7/2 FD: 1 BD: 1 +.+.: pci_epf_mutex FD: 183 BD: 1 +.+.: ipmi_interfaces_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&k->k_lock ->&c->lock ->&____s->seqcount ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->pcpu_alloc_mutex ->wq_pool_mutex ->&base->lock ->panic_notifier_list.lock FD: 2 BD: 1 +.+.: smi_watchers_mutex ->&ipmi_interfaces_srcu FD: 1 BD: 3 .+.?: &ipmi_interfaces_srcu FD: 1 BD: 1 +.+.: smi_infos_lock FD: 4 BD: 162 ....: mask_lock ->tmp_mask_lock FD: 3 BD: 163 ....: tmp_mask_lock ->tmpmask_lock ->&its->lock FD: 29 BD: 1 ....: &desc->wait_for_threads ->&p->pi_lock FD: 3 BD: 3 +.+.: subsys mutex#30 ->&k->k_lock FD: 31 BD: 4 +.+.: &dev->mutex#2 ->&obj_hash[i].lock ->&x->wait#2 ->&rq->__lock ->&ff->mutex FD: 169 BD: 1 +.+.: register_count_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&k->k_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock FD: 1 BD: 141 ....: thermal_cdev_ida.xa_lock FD: 1 BD: 141 ....: cpufreq_driver_lock FD: 3 BD: 141 +.+.: subsys mutex#31 ->&k->k_lock FD: 78 BD: 1 +.+.: scmi_requested_devices_mtx ->fs_reclaim ->pool_lock#2 ->&____s->seqcount FD: 35 BD: 1 ..-.: lib/debugobjects.c:102 FD: 31 BD: 2 +.+.: (debug_obj_work).work ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->quarantine_lock ->rcu_node_0 FD: 1 BD: 1 ....: virtio_index_ida.xa_lock FD: 1 BD: 1 +.+.: subsys mutex#32 FD: 1 BD: 1 +.+.: vdpa_dev_lock FD: 125 BD: 3 ++++: &type->i_mutex_dir_key#2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&c->lock ->&____s->seqcount ->namespace_sem ->&sem->wait_lock ->&rq->__lock ->rcu_node_0 ->&n->list_lock ->remove_cache_srcu ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&simple_offset_lock_class ->tk_core.seq.seqcount FD: 3 BD: 1 +.+.: subsys mutex#33 ->&k->k_lock FD: 1 BD: 1 ....: rng_index_ida.xa_lock FD: 187 BD: 4 +.+.: &md->mutex ->fs_reclaim ->pool_lock#2 ->irq_domain_mutex ->pci_lock ->&xa->xa_lock#6 ->&its->dev_alloc_lock ->&domain->mutex ->&irq_desc_lock_class ->tmpmask_lock ->&its->lock ->&root->kernfs_rwsem ->lock ->&obj_hash[i].lock ->sparse_irq_lock ->&vn->busy.lock ->&vn->lazy.lock ->&c->lock ->&____s->seqcount ->&rq->__lock FD: 2 BD: 5 +.+.: &xa->xa_lock#6 ->pool_lock#2 FD: 80 BD: 8 +.+.: &its->dev_alloc_lock ->&its->lock ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->lpi_range_lock ->&rq->__lock ->&obj_hash[i].lock FD: 1 BD: 164 ....: tmpmask_lock FD: 1 BD: 162 ....: &gic_data_rdist()->rd_lock FD: 1 BD: 1 +.+.: &dev->vqs_list_lock FD: 1 BD: 1 ....: &vp_dev->lock FD: 81 BD: 1 +.+.: rng_mutex ->&x->wait#13 ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&x->wait ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 2 ....: &x->wait#13 FD: 29 BD: 2 -.-.: &x->wait#14 ->&p->pi_lock FD: 1 BD: 1 ....: &dev->config_lock FD: 36 BD: 1 +.+.: reading_mutex ->&x->wait#14 ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->pool_lock#2 FD: 2 BD: 1 +.-.: drivers/char/random.c:1026 ->input_pool.lock FD: 1 BD: 1 ....: &dev->managed.lock FD: 119 BD: 1 +.+.: &type->s_umount_key#24/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->&____s->seqcount ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#22 ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#22 ->&dentry->d_lock FD: 2 BD: 399 ....: drm_minor_lock ->pool_lock#2 FD: 3 BD: 3 +.+.: subsys mutex#34 ->&k->k_lock FD: 79 BD: 43 +.+.: &dev->mode_config.idr_mutex ->fs_reclaim ->pool_lock#2 ->&file->master_lookup_lock FD: 457 BD: 39 +.+.: crtc_ww_class_acquire ->crtc_ww_class_mutex ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&mm->mmap_lock ->&obj_hash[i].lock ->&c->lock FD: 119 BD: 40 +.+.: crtc_ww_class_mutex ->reservation_ww_class_acquire ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&dev->mode_config.idr_mutex ->&dev->mode_config.blob_lock ->&crtc->commit_lock ->reservation_ww_class_mutex ->&dev->mode_config.panic_lock ->tk_core.seq.seqcount ->&vkms_out->lock ->&dev->vbl_lock ->&x->wait#15 ->&base->lock ->&rq->__lock ->(&timer.timer) ->&file->master_lookup_lock ->&file_private->table_lock ->&dev->mode_config.fb_lock ->&n->list_lock ->&dev->event_lock ->(worker)->lock ->&x->wait ->&mgr->vm_lock FD: 1 BD: 41 +.+.: &dev->mode_config.blob_lock FD: 1 BD: 1 ....: &xa->xa_lock#7 FD: 1 BD: 1 ....: &xa->xa_lock#8 FD: 1 BD: 42 ....: &dev->mode_config.connector_list_lock FD: 18 BD: 44 ..-.: &dev->vbl_lock ->&dev->vblank_time_lock ->&vblank->queue FD: 218 BD: 1 .+.+: drm_connector_list_iter ->&dev->mode_config.connector_list_lock ->fs_reclaim ->pool_lock#2 ->&connector->mutex ->&file->master_lookup_lock ->&c->lock FD: 216 BD: 2 +.+.: &connector->mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#34 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&dev->mode_config.idr_mutex ->&rq->__lock ->connector_list_lock FD: 1 BD: 3 +.+.: connector_list_lock FD: 1 BD: 1 +.+.: &dev->filelist_mutex FD: 498 BD: 1 +.+.: &dev->clientlist_mutex ->&helper->lock ->registration_lock ->(console_sem).lock ->kernel_fb_helper_lock FD: 466 BD: 35 +.+.: &helper->lock ->fs_reclaim ->pool_lock#2 ->&client->modeset_mutex ->&obj_hash[i].lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&sb->s_type->i_lock_key ->&mgr->vm_lock ->&dev->object_name_lock ->&node->vm_lock ->&file_private->table_lock ->&dev->mode_config.idr_mutex ->&dev->mode_config.fb_lock ->&file->fbs_lock ->&prime_fpriv->lock ->reservation_ww_class_mutex ->&____s->seqcount ->&dev->master_mutex FD: 459 BD: 37 +.+.: &client->modeset_mutex ->&dev->mode_config.mutex ->crtc_ww_class_acquire FD: 458 BD: 38 +.+.: &dev->mode_config.mutex ->crtc_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 2 BD: 41 +.+.: &mgr->vm_lock ->pool_lock#2 FD: 99 BD: 36 +.+.: &dev->object_name_lock ->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 9 BD: 400 +.+.: &file_private->table_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 4 BD: 36 +.+.: &node->vm_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 41 +.+.: &dev->mode_config.fb_lock FD: 1 BD: 36 +.+.: &file->fbs_lock FD: 1 BD: 36 +.+.: &prime_fpriv->lock FD: 45 BD: 966 -.-.: &xa->xa_lock#9 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock ->key#11 ->&s->s_inode_wblist_lock ->&base->lock ->key#12 ->&wb->work_lock ->&pl->lock ->&xa->xa_lock#5 ->stock_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->key#13 ->&memcg->deferred_split_queue.split_queue_lock ->&lruvec->lru_lock FD: 1 BD: 166 +.+.: &info->lock FD: 4 BD: 863 +.+.: lock#4 ->&lruvec->lru_lock ->&obj_hash[i].lock FD: 1 BD: 965 ....: &lruvec->lru_lock FD: 496 BD: 2 +.+.: registration_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->&____s->seqcount ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#9 ->vt_switch_mutex ->(console_sem).lock ->console_lock FD: 78 BD: 3 +.+.: vt_switch_mutex ->fs_reclaim ->pool_lock#2 FD: 15 BD: 35 +.+.: &fb_info->lock ->&(&info->deferred_work)->timer ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 35 ....: vt_event_lock FD: 460 BD: 36 +.+.: &dev->master_mutex ->&client->modeset_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->&file->master_lookup_lock ->&dev->mode_config.idr_mutex ->&obj_hash[i].lock FD: 1 BD: 41 +.+.: &crtc->commit_lock FD: 1 BD: 41 ....: &dev->mode_config.panic_lock FD: 37 BD: 41 -.-.: &vkms_out->lock ->&dev->event_lock FD: 36 BD: 42 -.-.: &dev->event_lock ->&dev->vbl_lock ->&____s->seqcount#5 ->&x->wait#15 ->&obj_hash[i].lock ->pool_lock#2 ->&dev->vblank_time_lock ->&vblank->queue ->&base->lock ->&vblank->work_wait_queue FD: 1 BD: 47 ----: &____s->seqcount#5 FD: 29 BD: 43 -...: &x->wait#15 ->&p->pi_lock FD: 16 BD: 45 -.-.: &dev->vblank_time_lock ->tk_core.seq.seqcount ->&(&vblank->seqlock)->lock ->&obj_hash[i].lock ->hrtimer_bases.lock FD: 2 BD: 46 -.-.: &(&vblank->seqlock)->lock ->&____s->seqcount#5 FD: 1 BD: 38 ....: &helper->damage_lock FD: 2 BD: 2 +.+.: (work_completion)(&helper->damage_work) ->&helper->damage_lock FD: 1 BD: 45 -.-.: &vblank->queue FD: 1 BD: 2 +.+.: kernel_fb_helper_lock FD: 1 BD: 1 +.+.: drivers_lock#2 FD: 1 BD: 1 +.+.: devices_lock FD: 1 BD: 9 ....: blk_queue_ida.xa_lock FD: 2 BD: 31 +.+.: &xa->xa_lock#10 ->pool_lock#2 FD: 13 BD: 416 ....: &q->queue_lock ->&blkcg->lock ->&c->lock ->pool_lock#2 ->pcpu_lock ->&obj_hash[i].lock ->&n->list_lock FD: 2 BD: 417 ....: &blkcg->lock ->pool_lock#2 FD: 1 BD: 35 +.+.: &bdev->bd_size_lock FD: 3 BD: 31 +.+.: subsys mutex#35 ->&k->k_lock FD: 327 BD: 9 +.+.: &q->sysfs_dir_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->&q->sysfs_lock ->&obj_hash[i].lock FD: 326 BD: 10 +.+.: &q->sysfs_lock ->&q->debugfs_mutex ->&q->unused_hctx_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->cpu_hotplug_lock ->fs_reclaim ->&xa->xa_lock#11 ->pcpu_alloc_mutex ->&q->rq_qos_mutex ->&stats->lock ->&c->lock ->&____s->seqcount ->&rq->__lock ->&cfs_rq->removed.lock ->lock ->&root->kernfs_rwsem FD: 160 BD: 12 +.+.: &q->debugfs_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 FD: 30 BD: 60 ..-.: percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 3 BD: 12 +.+.: subsys mutex#36 ->&k->k_lock FD: 1 BD: 13 ....: cgwb_lock FD: 1 BD: 12 +...: bdi_lock FD: 86 BD: 406 +.+.: inode_hash_lock ->&sb->s_type->i_lock_key#3 ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->&sb->s_type->i_lock_key#25 ->&sb->s_type->i_lock_key#31 ->&sb->s_type->i_lock_key#32 ->&sb->s_type->i_lock_key#34 ->&sb->s_type->i_lock_key#36 ->&sb->s_type->i_lock_key#37 ->&sb->s_type->i_lock_key#38 ->&sb->s_type->i_lock_key#39 ->&sb->s_type->i_lock_key#40 ->&sb->s_type->i_lock_key#41 ->&sb->s_type->i_lock_key#42 ->&sb->s_type->i_lock_key ->&sb->s_type->i_lock_key#43 ->&sb->s_type->i_lock_key#45 ->&sb->s_type->i_lock_key#47 ->&sb->s_type->i_lock_key#48 ->&sb->s_type->i_lock_key#49 ->&sb->s_type->i_lock_key#50 ->&sb->s_type->i_lock_key#51 FD: 29 BD: 29 +.+.: bdev_lock ->&bdev->bd_holder_lock ->&rq->__lock FD: 352 BD: 28 +.+.: &disk->open_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->init_mm.page_table_lock ->&xa->xa_lock#9 ->lock#4 ->mmu_notifier_invalidate_range_start ->&c->lock ->&mapping->i_private_lock ->tk_core.seq.seqcount ->&ret->b_uptodate_lock ->&obj_hash[i].lock ->&vn->lazy.lock ->&base->lock ->&folio_wait_table[i] ->&rq->__lock ->&cfs_rq->removed.lock ->(console_sem).lock ->&s->s_inode_list_lock ->pcpu_alloc_mutex ->&bdev->bd_size_lock ->&x->wait#8 ->ext_devt_ida.xa_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&k->k_lock ->subsys mutex#35 ->&xa->xa_lock#10 ->inode_hash_lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->bdev_lock ->rcu_node_0 ->&lo->lo_mutex ->nbd_index_mutex ->&nbd->config_lock ->&new->lock ->stock_lock ->&wb->list_lock ->&lo->lo_lock ->&q->limits_lock ->uevent_sock_mutex ->&bdev->bd_holder_lock ->&sem->wait_lock ->&____s->seqcount#2 ->kernfs_idr_lock ->&n->list_lock ->&lock->wait_lock ->quarantine_lock ->lock#2 ->&rcu_state.expedited_wq ->&vn->pool_lock ->remove_cache_srcu FD: 31 BD: 822 +.+.: &mapping->i_private_lock ->&eb->refs_lock ->&p->pi_lock ->&obj_hash[i].lock FD: 30 BD: 46 -.-.: &ret->b_uptodate_lock ->bit_wait_table + i FD: 78 BD: 1 +.+.: loop_ctl_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 11 +.+.: &q->unused_hctx_lock FD: 8 BD: 11 +.+.: &xa->xa_lock#11 ->&c->lock ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 32 +.+.: &set->tag_list_lock FD: 1 BD: 274 ....: &sb->map[i].swap_lock FD: 165 BD: 11 +.+.: &q->rq_qos_mutex ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->debugfs_mutex ->set->srcu FD: 31 BD: 36 +.+.: &q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->mq_freeze_wq FD: 29 BD: 61 ..-.: &q->mq_freeze_wq ->&p->pi_lock FD: 1 BD: 11 ....: &stats->lock FD: 326 BD: 29 +.+.: nbd_index_mutex ->fs_reclaim ->pool_lock#2 ->&nbd->config_lock FD: 1 BD: 36 .+.+: set->srcu FD: 35 BD: 1 ..-.: &(&ops->cursor_work)->timer FD: 37 BD: 2 +.+.: (work_completion)(&(&ops->cursor_work)->work) ->(console_sem).lock ->&obj_hash[i].lock ->&base->lock FD: 340 BD: 1 +.+.: zram_index_mutex ->fs_reclaim ->pool_lock#2 ->blk_queue_ida.xa_lock ->&obj_hash[i].lock ->pcpu_alloc_mutex ->bio_slab_lock ->percpu_counters_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->&xa->xa_lock#10 ->lock ->&____s->seqcount ->&x->wait#8 ->&bdev->bd_size_lock ->&k->list_lock ->gdp_mutex ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->subsys mutex#35 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->percpu_ref_switch_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#36 ->cgwb_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->bdi_lock ->inode_hash_lock ->(console_sem).lock FD: 3 BD: 1 +.+.: subsys mutex#37 ->&k->k_lock FD: 79 BD: 2 +.+.: &default_group_class[depth - 1]#3 ->fs_reclaim ->pool_lock#2 ->configfs_dirent_lock FD: 370 BD: 1 +.+.: &lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->pcpu_alloc_mutex ->batched_entropy_u32.lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->blk_queue_ida.xa_lock ->&q->sysfs_lock ->&set->tag_list_lock ->&sb->map[i].swap_lock ->bio_slab_lock ->percpu_counters_lock ->&s->s_inode_list_lock ->&xa->xa_lock#10 ->lock ->&x->wait#8 ->nullb_indexes.xa_lock ->&bdev->bd_size_lock ->&k->list_lock ->gdp_mutex ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->subsys mutex#35 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->percpu_ref_switch_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#36 ->cgwb_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->bdi_lock ->inode_hash_lock ->bdev_lock ->&sb->s_type->i_lock_key#3 ->&disk->open_mutex ->(console_sem).lock FD: 1 BD: 2 ....: nullb_indexes.xa_lock FD: 1 BD: 1 ....: nfc_index_ida.xa_lock FD: 176 BD: 3 +.+.: nfc_devlist_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&rq->__lock ->running_helpers_waitq.lock ->subsys mutex#38 ->&k->k_lock ->&genl_data->genl_data_mutex FD: 3 BD: 4 +.+.: subsys mutex#38 ->&k->k_lock FD: 4 BD: 5 +.+.: llcp_devices_lock ->&k->list_lock ->&k->k_lock FD: 1 BD: 74 ....: &rfkill->lock FD: 30 BD: 28 +.+.: subsys mutex#39 ->&k->k_lock ->&rq->__lock FD: 179 BD: 2 +.+.: (work_completion)(&rfkill->sync_work) ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 2 BD: 1 +.+.: dma_heap_minors.xa_lock ->pool_lock#2 FD: 3 BD: 1 +.+.: subsys mutex#40 ->&k->k_lock FD: 1 BD: 1 +.+.: heap_list_lock FD: 4 BD: 1 +.+.: subsys mutex#41 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 +.+.: nvmf_hosts_mutex FD: 3 BD: 1 +.+.: subsys mutex#42 ->&k->k_lock FD: 1 BD: 1 +.+.: nvmf_transports_rwsem FD: 3 BD: 1 +.+.: subsys mutex#43 ->&k->k_lock FD: 1 BD: 3 ....: nvme_instance_ida.xa_lock FD: 115 BD: 3 +.+.: &default_group_class[depth - 1]#4/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#19 ->&default_group_class[depth - 1]#5/2 FD: 1 BD: 1 +.+.: nvmet_config_sem FD: 3 BD: 3 +.+.: subsys mutex#44 ->&k->k_lock FD: 3 BD: 1 +.+.: subsys mutex#45 ->&k->k_lock FD: 160 BD: 3 +.+.: pools_reg_lock ->pools_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem FD: 1 BD: 4 +.+.: pools_lock FD: 114 BD: 4 +.+.: &default_group_class[depth - 1]#5/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#19 ->&default_group_class[depth - 1]#6/2 FD: 113 BD: 5 +.+.: &default_group_class[depth - 1]#6/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#19 ->&default_group_class[depth - 1]#7 ->&default_group_class[depth - 1]#7/2 FD: 79 BD: 6 +.+.: &default_group_class[depth - 1]#7 ->fs_reclaim ->pool_lock#2 ->configfs_dirent_lock FD: 1 BD: 6 +.+.: &default_group_class[depth - 1]#7/2 FD: 1 BD: 1 +.+.: backend_mutex FD: 1 BD: 1 +.+.: scsi_mib_index_lock FD: 1 BD: 1 +.+.: hba_lock FD: 78 BD: 1 +.+.: device_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: &hba->device_lock FD: 2 BD: 3 ....: &ctrl->lock ->&ctrl->state_wq FD: 1 BD: 4 ....: &ctrl->state_wq FD: 1 BD: 8 +.+.: &hctx->lock FD: 1 BD: 213 +.+.: &nvmeq->sq_lock FD: 29 BD: 6 ..-.: &x->wait#16 ->&p->pi_lock FD: 175 BD: 6 +.+.: nvme_subsystems_lock ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&c->lock ->&____s->seqcount ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->subsys mutex#46 FD: 3 BD: 7 +.+.: subsys mutex#46 ->&k->k_lock FD: 1 BD: 1 +.+.: part_parser_lock FD: 1 BD: 7 +.+.: &xa->xa_lock#12 FD: 349 BD: 1 +.+.: mtd_table_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#47 ->devtree_lock ->nvmem_ida.xa_lock ->nvmem_cell_mutex ->&k->k_lock ->subsys mutex#48 ->nvmem_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->(console_sem).lock ->pcpu_alloc_mutex ->batched_entropy_u32.lock ->mmu_notifier_invalidate_range_start ->blk_queue_ida.xa_lock ->&q->sysfs_lock ->&set->tag_list_lock ->&sb->map[i].swap_lock ->bio_slab_lock ->percpu_counters_lock ->&s->s_inode_list_lock ->&xa->xa_lock#10 ->&bdev->bd_size_lock ->elv_list_lock ->&q->mq_freeze_lock ->set->srcu ->percpu_ref_switch_lock ->&q->debugfs_mutex ->subsys mutex#35 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->subsys mutex#36 ->cgwb_lock ->bdi_lock ->inode_hash_lock FD: 28 BD: 33 +.+.: &q->limits_lock ->&rq->__lock FD: 1 BD: 1 +.+.: chip_drvs_lock FD: 190 BD: 1 +.+.: &dev->shutdown_lock ->&md->mutex ->&desc->request_mutex ->&obj_hash[i].lock ->pool_lock#2 ->pci_lock ->fs_reclaim ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->&rq->__lock ->&cfs_rq->removed.lock ->register_lock ->&irq_desc_lock_class ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&c->lock FD: 1 BD: 162 ....: irq_resend_lock FD: 1 BD: 165 +.+.: &ent->pde_unload_lock FD: 1 BD: 71 ....: (kmod_concurrent_max).lock FD: 29 BD: 72 ....: &x->wait#17 ->&p->pi_lock FD: 1 BD: 6 ....: &prev->lock FD: 3 BD: 2 +.+.: subsys mutex#47 ->&k->k_lock FD: 1 BD: 2 ....: nvmem_ida.xa_lock FD: 1 BD: 2 +.+.: nvmem_cell_mutex FD: 1 BD: 768 ....: &sem->wait_lock FD: 1 BD: 2 +.+.: subsys mutex#48 FD: 1 BD: 2 +.+.: nvmem_mutex FD: 31 BD: 182 +.+.: (work_completion)(work) ->lock#4 ->lock#5 ->&rq->__lock FD: 1 BD: 786 +.+.: lock#5 FD: 84 BD: 1 +.+.: (wq_completion)gid-cache-wq ->(work_completion)(&ndev_work->work) ->(work_completion)(&work->work) ->&rq->__lock FD: 82 BD: 2 +.+.: (work_completion)(&ndev_work->work) ->devices_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->&base->lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 6 .+.+: &ctrl->srcu FD: 380 BD: 4 +.+.: (work_completion)(&ctrl->scan_work) ->&ctrl->scan_lock FD: 382 BD: 3 +.+.: (wq_completion)nvme-wq ->(work_completion)(&ctrl->async_event_work) ->(work_completion)(&ctrl->scan_work) ->(work_completion)(&barr->work) FD: 2 BD: 4 +.+.: (work_completion)(&ctrl->async_event_work) ->&nvmeq->sq_lock FD: 379 BD: 5 +.+.: &ctrl->scan_lock ->fs_reclaim ->pool_lock#2 ->&sb->map[i].swap_lock ->&hctx->lock ->&x->wait#16 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->&c->lock ->&____s->seqcount ->&cfs_rq->removed.lock ->&ctrl->srcu ->blk_queue_ida.xa_lock ->pcpu_alloc_mutex ->&q->sysfs_lock ->&set->tag_list_lock ->bio_slab_lock ->percpu_counters_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->&xa->xa_lock#10 ->lock ->&x->wait#8 ->nvme_subsystems_lock ->&subsys->lock ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->limits_lock ->&bdev->bd_size_lock ->&ctrl->namespaces_lock ->&ACCESS_PRIVATE(sdp, lock) ->&x->wait#9 ->ext_devt_ida.xa_lock ->&k->list_lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&k->k_lock ->subsys mutex#35 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->gdp_mutex ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#36 ->cgwb_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->bdi_lock ->inode_hash_lock ->bdev_lock ->&sb->s_type->i_lock_key#3 ->&disk->open_mutex ->nvme_ns_chr_minor_ida.xa_lock ->chrdevs_lock ->subsys mutex#50 ->&dentry->d_lock ->quarantine_lock FD: 1 BD: 67 +.+.: &bond->stats_lock FD: 20 BD: 85 ....: lweventlist_lock ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&base->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 FD: 29 BD: 67 ++++: (inet6addr_validator_chain).rwsem ->&rq->__lock ->rcu_node_0 FD: 28 BD: 67 ++++: (inetaddr_validator_chain).rwsem ->&rq->__lock FD: 84 BD: 6 +.+.: &subsys->lock ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#12 ->&obj_hash[i].lock ->pcpu_alloc_mutex FD: 1 BD: 6 +.+.: &ctrl->namespaces_lock FD: 35 BD: 7 ....: &ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&obj_hash[i].lock ->&base->lock FD: 1088 BD: 2 +.+.: (linkwatch_work).work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 29 ....: ext_devt_ida.xa_lock FD: 29 BD: 800 -.-.: &folio_wait_table[i] ->&p->pi_lock FD: 3 BD: 1 +.+.: subsys mutex#49 ->&k->k_lock FD: 1 BD: 2 +.+.: gpio_lookup_lock FD: 1 BD: 1 +.+.: mdio_board_lock FD: 1 BD: 6 ....: nvme_ns_chr_minor_ida.xa_lock FD: 3 BD: 6 +.+.: subsys mutex#50 ->&k->k_lock FD: 35 BD: 1 ..-.: fs/file_table.c:469 FD: 1 BD: 1 +.+.: mode_list_lock FD: 1254 BD: 2 +.+.: (delayed_fput_work).work ->&disk->open_mutex ->&dentry->d_lock ->&dentry->d_lock/1 ->&obj_hash[i].lock ->pool_lock#2 ->&sb->s_type->i_lock_key#18 ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->stock_lock ->&sb->s_type->i_mutex_key#10 ->&sb->s_type->i_lock_key#9 ->binder_deferred_lock FD: 1 BD: 1 +.+.: l3mdev_lock FD: 1 BD: 187 -.-.: &retval->lock FD: 1 BD: 67 +.+.: &priv->adminq_lock FD: 51 BD: 1 +.+.: (wq_completion)gve ->(work_completion)(&priv->service_task) FD: 50 BD: 2 +.+.: (work_completion)(&priv->service_task) ->(console_sem).lock ->&rq->__lock ->lweventlist_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 FD: 1 BD: 1 +.+.: hnae3_common_lock FD: 3 BD: 1 +.+.: subsys mutex#51 ->&k->k_lock FD: 2 BD: 1 +.+.: compressor_list_lock ->pool_lock#2 FD: 1 BD: 5 ....: hwsim_netgroup_ida.xa_lock FD: 39 BD: 104 +.-.: hwsim_radio_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&list->lock#16 ->&____s->seqcount#2 ->&n->list_lock ->&zone->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->init_task.mems_allowed_seq.seqcount FD: 30 BD: 11 +.+.: subsys mutex#52 ->&k->k_lock ->&rq->__lock FD: 80 BD: 69 +.+.: param_lock ->rate_ctrl_mutex ->disk_events_mutex FD: 450 BD: 71 +.+.: &rdev->wiphy.mtx ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#53 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->reg_requests_lock ->stack_depot_init_mutex ->pcpu_alloc_mutex ->&xa->xa_lock#4 ->net_rwsem ->&x->wait#8 ->subsys mutex#19 ->&dir->lock#2 ->dev_hotplug_mutex ->input_pool.lock ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->netdev_rename_lock.seqcount ->&fq->lock ->&local->iflist_mtx ->&rq->__lock ->rlock-AF_NETLINK ->&rdev->bss_lock ->lweventlist_lock ->&base->lock ->&data->mutex ->&rdev->wiphy_work_lock ->&local->filter_lock ->&tn->lock ->pool_lock ->failover_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&idev->mc_lock ->&pnettable->lock ->smc_ib_devices.mutex ->&ndev->lock ->&wdev->event_lock ->&rdev->mgmt_registrations_lock ->(&dwork->timer) ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#8 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->mount_lock ->(&dwork->timer)#2 ->rtnl_mutex.wait_lock ->&p->pi_lock ->&list->lock#15 ->&ifibss->incomplete_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->tk_core.seq.seqcount ->hrtimer_bases.lock ->&n->list_lock ->&list->lock#2 ->remove_cache_srcu ->&lock->wait_lock ->&sta->lock ->lock#6 ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->bh_lock ->&local->ack_status_lock ->&local->queue_stop_reason_lock ->&list->lock#16 ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&wdev->pmsr_lock ->krc.lock ->&local->active_txq_lock[i] ->&x->wait#2 ->(&ifibss->timer) ->_xmit_ETHER ->(&local->dynamic_ps_timer) ->&wq->mutex ->&rnp->exp_wq[1] ->cpu_hotplug_lock ->bpf_devs_lock ->&in_dev->mc_tomb_lock ->class ->(&tbl->proxy_timer) ->&ul->lock ->&net->xdp.lock ->mirred_list_lock ->&nft_net->commit_mutex ->&ent->pde_unload_lock ->&net->ipv6.addrconf_hash_lock ->&idev->mc_query_lock ->&idev->mc_report_lock ->&pnn->pndevs.lock ->&pnn->routes.lock ->target_list_lock ->&dev->ethtool->rss_lock ->kernfs_idr_lock ->dev_pm_qos_sysfs_mtx ->deferred_probe_mutex ->device_links_lock ->(&dwork->timer)#3 ->mmu_notifier_invalidate_range_start ->&ul->lock#2 ->&sem->wait_lock ->rcu_state.exp_mutex.wait_lock ->&rnp->exp_lock ->&ifmgd->teardown_lock ->(&ifmgd->timer) ->&rdev->beacon_registrations_lock ->&hwstats->hwsdev_list_lock ->&rcu_state.gp_wq ->stock_lock FD: 30 BD: 72 +.+.: subsys mutex#53 ->&k->k_lock ->&rq->__lock FD: 1 BD: 72 +.+.: reg_requests_lock FD: 1 BD: 28 +.+.: rfkill_global_mutex.wait_lock FD: 7 BD: 103 +.-.: &fq->lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&zone->lock ->pool_lock#2 FD: 28 BD: 72 +.+.: &local->iflist_mtx ->&rq->__lock FD: 3 BD: 67 +.+.: subsys mutex#54 ->&k->k_lock FD: 79 BD: 70 +.+.: &sdata->sec_mtx ->&sec->lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 71 +...: &sec->lock FD: 1 BD: 67 +.+.: &local->iflist_mtx#2 FD: 78 BD: 1 +.+.: hwsim_phys_lock ->fs_reclaim ->pool_lock#2 FD: 78 BD: 1 +.+.: xdomain_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: ioctl_mutex FD: 1 BD: 1 +.+.: address_handler_list_lock FD: 1 BD: 1 +.+.: card_mutex FD: 3 BD: 1 +.+.: subsys mutex#55 ->&k->k_lock FD: 29 BD: 1 ....: &x->wait#18 ->&p->pi_lock FD: 31 BD: 2 ..-.: &txlock ->&list->lock#3 ->&txwq FD: 1 BD: 3 ..-.: &list->lock#3 FD: 29 BD: 3 ..-.: &txwq ->&p->pi_lock FD: 2 BD: 1 ....: &iocq[i].lock ->&ktiowq[i] FD: 1 BD: 2 ....: &ktiowq[i] FD: 1 BD: 1 ....: rcu_read_lock_bh FD: 37 BD: 119 +.-.: noop_qdisc.q.lock ->pcpu_lock FD: 3 BD: 5 +.+.: subsys mutex#56 ->&k->k_lock FD: 259 BD: 1 +.+.: usb_bus_idr_lock ->(usb_notifier_list).rwsem ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->hcd_root_hub_lock ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#19 ->&dev->power.lock ->device_links_srcu ->&c->lock ->&____s->seqcount ->(console_sem).lock ->input_pool.lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#57 ->&x->wait#8 ->&vhci_hcd->vhci->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&base->lock ->(&timer.timer) ->&lock->wait_lock FD: 2 BD: 1 +.-.: (&ipmi_timer) ->&ipmi_interfaces_srcu FD: 169 BD: 1 +.+.: table_lock ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&k->k_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->(console_sem).lock ->&c->lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock FD: 79 BD: 5 +.+.: mon_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&mbus->lock ->&obj_hash[i].lock FD: 174 BD: 4 +.+.: usb_port_peer_mutex ->fs_reclaim ->pool_lock#2 ->devtree_lock ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&____s->seqcount ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->dev_pm_qos_mtx ->component_mutex ->device_links_srcu ->dev_pm_qos_sysfs_mtx ->&rq->__lock ->sysfs_symlink_target_lock ->device_state_lock FD: 36 BD: 29 ....: device_state_lock ->kernfs_notify_lock FD: 42 BD: 27 ..-.: hcd_root_hub_lock ->hcd_urb_list_lock ->&bh->lock ->&dev->power.lock ->device_state_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 28 -.-.: hcd_urb_list_lock FD: 1 BD: 28 ..-.: &bh->lock FD: 50 BD: 1 +.-.: (wq_completion)events_bh ->(work_completion)(&bh->bh) FD: 49 BD: 3 +.-.: (work_completion)(&bh->bh) ->&bh->lock ->lock#6 ->&x->wait#19 ->usb_kill_urb_queue.lock ->&obj_hash[i].lock ->pool_lock#2 ->&hub->irq_urb_lock FD: 12 BD: 83 ..-.: lock#6 ->kcov_remote_lock ->&kcov->lock FD: 10 BD: 169 ..-.: kcov_remote_lock ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&n->list_lock FD: 29 BD: 17 -.-.: &x->wait#19 ->&p->pi_lock FD: 1 BD: 4 +.+.: set_config_lock FD: 91 BD: 4 +.+.: hcd->bandwidth_mutex ->devtree_lock ->&obj_hash[i].lock ->&x->wait#8 ->&dev->power.lock ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->hcd_root_hub_lock ->&rq->__lock ->&x->wait#19 ->&c->lock ->&____s->seqcount ->&base->lock ->(&timer.timer) ->&dum_hcd->dum->lock FD: 1 BD: 4 +.+.: &new_driver->dynids.lock FD: 43 BD: 22 -.-.: &dum_hcd->dum->lock ->hcd_root_hub_lock ->hcd_urb_list_lock ->hrtimer_bases.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 91 BD: 11 +.+.: &hub->status_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->hcd_root_hub_lock ->fs_reclaim ->&dum_hcd->dum->lock ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#19 ->&base->lock ->(&timer.timer) ->&____s->seqcount ->&vhci_hcd->vhci->lock ->&c->lock ->&cfs_rq->removed.lock ->&n->list_lock ->&queue->lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 5 +.+.: component_mutex FD: 1 BD: 4 +.+.: subsys mutex#57 FD: 93 BD: 2 +.+.: (work_completion)(&(&hub->init_work)->work) ->&rq->__lock ->&lock->wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 287 BD: 1 +.+.: (wq_completion)usb_hub_wq ->(work_completion)(&hub->events) FD: 286 BD: 2 +.+.: (work_completion)(&hub->events) ->lock#6 ->&rq->__lock ->&dev->power.lock FD: 1 BD: 246 +.+.: &lock->wait_lock FD: 43 BD: 6 ..-.: &hub->irq_urb_lock ->hcd_root_hub_lock FD: 1 BD: 3 ....: (&hub->irq_urb_retry) FD: 1 BD: 10 ....: hcd_urb_unlink_lock FD: 29 BD: 13 -.-.: usb_kill_urb_queue.lock ->&p->pi_lock FD: 50 BD: 1 +.-.: (wq_completion)events_bh_highpri ->(work_completion)(&bh->bh) FD: 48 BD: 10 +.+.: udc_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->&queue->lock FD: 3 BD: 1 +.+.: subsys mutex#58 ->&k->k_lock FD: 1 BD: 1 ....: gadget_id_numbers.xa_lock FD: 124 BD: 2 +.+.: (work_completion)(&gadget->work) ->&root->kernfs_rwsem ->kernfs_notify_lock FD: 35 BD: 200 ....: kernfs_notify_lock FD: 89 BD: 2 +.+.: kernfs_notify_work ->kernfs_notify_lock ->&root->kernfs_supers_rwsem ->&rq->__lock FD: 87 BD: 7 ++++: &root->kernfs_supers_rwsem ->inode_hash_lock ->&rq->__lock FD: 1 BD: 1 +.+.: subsys mutex#59 FD: 1 BD: 1 +.+.: func_lock FD: 1 BD: 1 +.+.: g_tf_lock FD: 1 BD: 13 ....: &vhci_hcd->vhci->lock FD: 2 BD: 3 ....: input_ida.xa_lock ->pool_lock#2 FD: 28 BD: 1 +.+.: &mousedev->mutex/1 ->&rq->__lock FD: 35 BD: 4 ....: serio_event_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 115 BD: 1 +.+.: (wq_completion)events_long ->serio_event_work ->(work_completion)(&(&ipvs->defense_work)->work) ->(work_completion)(&(&br->gc_work)->work) ->&rq->__lock ->(work_completion)(&br->mcast_gc_work) ->(work_completion)(&(&sbi->mdb_work)->work) ->(work_completion)(&barr->work) FD: 40 BD: 2 +.+.: serio_event_work ->serio_mutex FD: 39 BD: 3 +.+.: serio_mutex ->serio_event_lock ->&k->list_lock ->&k->k_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 ....: rtc_ida.xa_lock FD: 37 BD: 1 +.+.: &rtc->ops_lock ->(efi_runtime_lock).lock ->&obj_hash[i].lock ->&x->wait#12 ->&rq->__lock ->&cfs_rq->removed.lock ->pool_lock#2 FD: 1 BD: 2 ....: platform_devid_ida.xa_lock FD: 1 BD: 2 ....: rtcdev_lock FD: 78 BD: 1 +.+.: g_smscore_deviceslock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: cx231xx_devlist_mutex FD: 1 BD: 1 +.+.: em28xx_devlist_mutex FD: 1 BD: 1 ....: pvr2_context_sync_data.lock FD: 35 BD: 1 ..-.: &(&krcp->monitor_work)->timer FD: 36 BD: 2 +.+.: (work_completion)(&(&krcp->monitor_work)->work) ->krc.lock ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 9 +.+.: i2c_dev_list_lock FD: 3 BD: 9 +.+.: subsys mutex#60 ->&k->k_lock FD: 1 BD: 1 +.+.: subsys mutex#61 FD: 181 BD: 2 +.+.: dvbdev_register_lock ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->minor_rwsem ->&xa->xa_lock#13 ->&mdev->graph_mutex ->&____s->seqcount ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->&c->lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#62 FD: 182 BD: 1 +.+.: frontend_mutex ->fs_reclaim ->pool_lock#2 ->(console_sem).lock ->dvbdev_register_lock FD: 1 BD: 3 +.+.: minor_rwsem FD: 2 BD: 4 ....: &xa->xa_lock#13 ->pool_lock#2 FD: 78 BD: 5 +.+.: &mdev->graph_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 3 BD: 3 +.+.: subsys mutex#62 ->&k->k_lock FD: 1 BD: 1 ....: &dmxdev->lock FD: 1 BD: 1 +.+.: &dvbdemux->mutex FD: 1 BD: 1 +.+.: media_devnode_lock FD: 1 BD: 1 +.+.: subsys mutex#63 FD: 180 BD: 1 +.+.: videodev_lock ->&x->wait#8 ->&obj_hash[i].lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#64 ->&xa->xa_lock#13 ->&mdev->graph_mutex FD: 3 BD: 2 +.+.: subsys mutex#64 ->&k->k_lock FD: 1 BD: 1 +.+.: vimc_sensor:389:(&vsensor->hdl)->_lock FD: 1 BD: 1 +.+.: &v4l2_dev->lock FD: 1 BD: 1 +.+.: vimc_debayer:572:(&vdebayer->hdl)->_lock FD: 1 BD: 1 +.+.: vimc_common:398:sd->active_state->lock FD: 1 BD: 1 +.+.: vimc_lens:61:(&vlens->hdl)->_lock FD: 90 BD: 1 +.+.: vivid_ctrls:1614:(hdl_user_gen)->_lock ->vivid_ctrls:1628:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->vivid_ctrls:1630:(hdl_vid_out)->_lock ->vivid_ctrls:1633:(hdl_vbi_cap)->_lock ->vivid_ctrls:1635:(hdl_vbi_out)->_lock ->vivid_ctrls:1638:(hdl_radio_rx)->_lock ->vivid_ctrls:1640:(hdl_radio_tx)->_lock ->vivid_ctrls:1642:(hdl_sdr_cap)->_lock ->vivid_ctrls:1644:(hdl_meta_cap)->_lock ->vivid_ctrls:1646:(hdl_meta_out)->_lock ->vivid_ctrls:1648:(hdl_tch_cap)->_lock ->&zone->lock ->&obj_hash[i].lock FD: 81 BD: 1 +.+.: vivid_ctrls:1616:(hdl_user_vid)->_lock ->vivid_ctrls:1628:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 FD: 84 BD: 1 +.+.: vivid_ctrls:1618:(hdl_user_aud)->_lock ->vivid_ctrls:1628:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1630:(hdl_vid_out)->_lock ->vivid_ctrls:1638:(hdl_radio_rx)->_lock ->vivid_ctrls:1640:(hdl_radio_tx)->_lock FD: 88 BD: 1 +.+.: vivid_ctrls:1620:(hdl_streaming)->_lock ->vivid_ctrls:1628:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->vivid_ctrls:1630:(hdl_vid_out)->_lock ->vivid_ctrls:1633:(hdl_vbi_cap)->_lock ->vivid_ctrls:1635:(hdl_vbi_out)->_lock ->vivid_ctrls:1642:(hdl_sdr_cap)->_lock ->vivid_ctrls:1644:(hdl_meta_cap)->_lock ->vivid_ctrls:1646:(hdl_meta_out)->_lock ->vivid_ctrls:1648:(hdl_tch_cap)->_lock FD: 82 BD: 1 +.+.: vivid_ctrls:1622:(hdl_sdtv_cap)->_lock ->vivid_ctrls:1628:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1633:(hdl_vbi_cap)->_lock ->&c->lock ->&____s->seqcount FD: 1 BD: 1 +.+.: vivid_ctrls:1624:(hdl_loop_cap)->_lock FD: 1 BD: 1 +.+.: vivid_ctrls:1626:(hdl_fb)->_lock FD: 3 BD: 6 +.+.: vivid_ctrls:1628:(hdl_vid_cap)->_lock ->hdmi_output_skip_mask_lock ->svid_output_skip_mask_lock FD: 1 BD: 4 +.+.: vivid_ctrls:1630:(hdl_vid_out)->_lock FD: 1 BD: 4 +.+.: vivid_ctrls:1633:(hdl_vbi_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1635:(hdl_vbi_out)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1638:(hdl_radio_rx)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1640:(hdl_radio_tx)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1642:(hdl_sdr_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1644:(hdl_meta_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1646:(hdl_meta_out)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1648:(hdl_tch_cap)->_lock FD: 1 BD: 1 ....: &adap->kthread_waitq FD: 1 BD: 1 +.+.: &dev->cec_xfers_slock FD: 1 BD: 7 +.+.: hdmi_output_skip_mask_lock FD: 1 BD: 1 ....: &dev->kthread_waitq_cec FD: 1 BD: 7 +.+.: svid_output_skip_mask_lock FD: 1 BD: 1 +.+.: cec_devnode_lock FD: 1 BD: 1 +.+.: subsys mutex#65 FD: 5 BD: 1 +.+.: &adap->lock ->tk_core.seq.seqcount ->&adap->devnode.lock_fhs FD: 1 BD: 2 +.+.: &adap->devnode.lock_fhs FD: 1 BD: 1 +.+.: ptp_clocks_map.xa_lock FD: 3 BD: 1 +.+.: subsys mutex#66 ->&k->k_lock FD: 1 BD: 1 +.+.: pers_lock FD: 1 BD: 1 +.+.: _lock FD: 1 BD: 3 +.+.: dm_bufio_clients_lock FD: 1 BD: 1 +.+.: _ps_lock FD: 1 BD: 1 +.+.: _lock#2 FD: 1 BD: 1 +.+.: _lock#3 FD: 1 BD: 1 +.+.: register_lock#2 FD: 3 BD: 1 +.+.: subsys mutex#67 ->&k->k_lock FD: 1 BD: 1 .+.+: bp_lock FD: 3 BD: 1 +.+.: subsys mutex#68 ->&k->k_lock FD: 17 BD: 1 +.-.: (&dsp_spl_tl) ->dsp_lock FD: 16 BD: 2 ..-.: dsp_lock ->iclock_lock ->&obj_hash[i].lock ->&base->lock FD: 4 BD: 3 ...-: iclock_lock ->tk_core.seq.seqcount FD: 79 BD: 67 +.+.: lock#7 ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#15 FD: 1 BD: 1 ....: iscsi_transport_lock FD: 3 BD: 1 +.+.: subsys mutex#69 ->&k->k_lock FD: 1 BD: 1 ....: &tx_task->waiting FD: 1 BD: 1 +.+.: link_ops_rwsem FD: 1 BD: 1 +.+.: disable_lock FD: 2 BD: 1 +.+.: scmi_protocols.xa_lock ->pool_lock#2 FD: 99 BD: 1 +.+.: psinfo_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->init_mm.page_table_lock ->(console_sem).lock ->pstore_sb_lock ->dump_list_lock FD: 83 BD: 3 +.+.: pstore_sb_lock ->&sb->s_type->i_mutex_key#12 FD: 1 BD: 2 ....: dump_list_lock FD: 1 BD: 1 +.+.: vsock_register_mutex FD: 1 BD: 1 +.+.: comedi_drivers_list_lock FD: 163 BD: 1 +.+.: cscfg_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->&rq->__lock ->&cfs_rq->removed.lock ->&k->list_lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->&dev->power.lock ->dpm_list_mtx FD: 1 BD: 750 +.+.: icc_bw_lock FD: 3 BD: 6 +.+.: subsys mutex#70 ->&k->k_lock FD: 167 BD: 2 ++++: snd_ctl_layer_rwsem ->snd_ctl_led_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->sysfs_symlink_target_lock ->&c->lock FD: 1 BD: 3 +.+.: snd_card_mutex FD: 439 BD: 1 ++++: snd_ioctl_rwsem ->&mm->mmap_lock FD: 78 BD: 2 +.+.: strings ->fs_reclaim ->pool_lock#2 ->&____s->seqcount FD: 17 BD: 4 +.+.: register_mutex ->&timer->lock ->slave_active_lock ->(&priv->tlist) ->&obj_hash[i].lock ->&base->lock FD: 177 BD: 3 +.+.: sound_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->&obj_hash[i].lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#70 ->&k->k_lock ->&cfs_rq->removed.lock FD: 190 BD: 1 +.+.: register_mutex#2 ->fs_reclaim ->pool_lock#2 ->sound_mutex ->&obj_hash[i].lock ->register_mutex ->&c->lock ->&____s->seqcount ->sound_oss_mutex ->strings ->&entry->access ->info_mutex FD: 179 BD: 1 +.+.: register_mutex#3 ->fs_reclaim ->pool_lock#2 ->sound_mutex ->clients_lock FD: 1 BD: 7 ....: clients_lock FD: 2 BD: 2 +.+.: &client->ports_mutex ->&client->ports_lock FD: 1 BD: 7 .+.+: &client->ports_lock FD: 197 BD: 1 +.+.: register_mutex#4 ->fs_reclaim ->pool_lock#2 ->sound_oss_mutex ->register_lock#3 ->&client->ports_mutex ->clients_lock ->&client->ports_lock ->&grp->list_mutex/1 ->&tmr->lock ->register_mutex ->queue_list_lock ->&c->lock ->&mdev->open_mutex ->&q->midi_sleep ->&obj_hash[i].lock ->&____s->seqcount ->quarantine_lock ->&q->timer_mutex FD: 179 BD: 3 +.+.: sound_oss_mutex ->fs_reclaim ->pool_lock#2 ->sound_loader_lock ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#70 ->&c->lock ->&____s->seqcount ->&k->k_lock FD: 1 BD: 4 +.+.: sound_loader_lock FD: 81 BD: 3 .+.+: &grp->list_mutex/1 ->clients_lock ->&client->ports_lock ->register_lock#3 ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 FD: 2 BD: 3 +.+.: &grp->list_mutex#2 ->&grp->list_lock FD: 1 BD: 4 ....: &grp->list_lock FD: 1 BD: 6 ....: register_lock#3 FD: 170 BD: 1 ++++: &card->controls_rwsem ->&xa->xa_lock#14 ->fs_reclaim ->&card->ctl_files_rwlock ->snd_ctl_layer_rwsem ->pool_lock#2 ->&____s->seqcount ->&c->lock ->&rq->__lock FD: 9 BD: 2 +.+.: &xa->xa_lock#14 ->pool_lock#2 ->&____s->seqcount ->&c->lock FD: 1 BD: 8 ...-: &card->ctl_files_rwlock FD: 86 BD: 2 +.+.: async_lookup_work ->fs_reclaim ->pool_lock#2 ->clients_lock ->&client->ports_lock ->register_lock#3 ->snd_card_mutex ->&x->wait#10 ->&rq->__lock ->&obj_hash[i].lock FD: 4 BD: 3 +.+.: autoload_work ->&k->list_lock ->&k->k_lock FD: 1 BD: 3 +.+.: snd_ctl_led_mutex FD: 1 BD: 1 +.+.: register_mutex#5 FD: 79 BD: 1 +.+.: client_mutex ->fs_reclaim ->pool_lock#2 ->&dev->devres_lock FD: 1 BD: 72 +.+.: failover_lock FD: 2 BD: 2 +...: llc_sap_list_lock ->pool_lock#2 FD: 78 BD: 1 +.+.: act_id_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 1 BD: 67 ++++: act_mod_lock FD: 1 BD: 1 +.+.: ife_mod_lock FD: 1 BD: 67 ++++: cls_mod_lock FD: 1 BD: 1 +.+.: ematch_mod_lock FD: 1 BD: 1 +.+.: nfnl_subsys_acct FD: 1 BD: 1 +.+.: nfnl_subsys_queue FD: 1 BD: 1 +.+.: nfnl_subsys_ulog FD: 1 BD: 5 +.+.: nf_log_mutex FD: 1 BD: 1 +.+.: nfnl_subsys_osf FD: 28 BD: 1 +.+.: nf_sockopt_mutex ->&rq->__lock FD: 1 BD: 1 +.+.: nfnl_subsys_ctnetlink FD: 1 BD: 1 +.+.: nfnl_subsys_ctnetlink_exp FD: 28 BD: 5 +.+.: nf_ct_ecache_mutex ->&rq->__lock FD: 29 BD: 1 +.+.: nfnl_subsys_cttimeout ->rcu_node_0 ->&rq->__lock FD: 1 BD: 1 +.+.: nfnl_subsys_cthelper FD: 1 BD: 1 +.+.: nf_ct_helper_mutex FD: 1 BD: 1 +...: nf_conntrack_expect_lock FD: 1 BD: 1 +.+.: nf_ct_nat_helpers_mutex FD: 122 BD: 1 +.+.: nfnl_subsys_nftables ->&nft_net->commit_mutex ->rcu_node_0 ->&rq->__lock FD: 1 BD: 1 +.+.: nfnl_subsys_nftcompat FD: 1199 BD: 1 +.+.: masq_mutex ->pernet_ops_rwsem ->(inetaddr_chain).rwsem ->inet6addr_chain.lock FD: 441 BD: 5 +.+.: &xt[i].mutex ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->&per_cpu(xt_recseq, i) ->&obj_hash[i].lock ->&vn->lazy.lock ->&c->lock ->&____s->seqcount#2 ->remove_cache_srcu ->&n->list_lock ->&rq->__lock ->&lock->wait_lock ->&vn->pool_lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->init_mm.page_table_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 28 BD: 88 +.+.: &tn->lock ->&rq->__lock FD: 3 BD: 1 +.+.: subsys mutex#71 ->&k->k_lock FD: 81 BD: 1 +.+.: nfnl_subsys_ipset ->nlk_cb_mutex-NETFILTER ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 +.+.: ip_set_type_mutex FD: 83 BD: 5 +.+.: ipvs->est_mutex ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->&n->list_lock ->&____s->seqcount ->pcpu_lock ->&obj_hash[i].lock FD: 1 BD: 1 +.+.: ip_vs_sched_mutex FD: 78 BD: 5 +.+.: __ip_vs_app_mutex ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&n->list_lock ->&obj_hash[i].lock ->&rq->__lock FD: 1 BD: 1 +.+.: ip_vs_pe_mutex FD: 1 BD: 1 +.+.: tunnel4_mutex FD: 1 BD: 1 +.+.: xfrm4_protocol_mutex FD: 1 BD: 1 +...: xfrm_km_lock FD: 1 BD: 1 +.+.: xfrm6_protocol_mutex FD: 1 BD: 1 +.+.: tunnel6_mutex FD: 1 BD: 1 +.+.: xfrm_if_cb_lock FD: 1 BD: 1 +...: inetsw6_lock FD: 1 BD: 12 +.+.: &hashinfo->lock#2 FD: 19 BD: 5 +.+.: &net->ipv6.ip6addrlbl_table.lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 170 BD: 75 +.+.: &idev->mc_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&dev_addr_list_lock_key ->&dev_addr_list_lock_key#2 ->_xmit_ETHER ->&c->lock ->&____s->seqcount ->&dev_addr_list_lock_key#3 ->batched_entropy_u32.lock ->&base->lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->krc.lock ->&n->list_lock ->&dev_addr_list_lock_key#5 ->&dev_addr_list_lock_key#6 ->&dev_addr_list_lock_key#7 ->rcu_node_0 ->&batadv_netdev_addr_lock_key ->&dev_addr_list_lock_key#8 ->&dev_addr_list_lock_key#9 ->&macvlan_netdev_addr_lock_key ->&dev_addr_list_lock_key#10 ->&dev_addr_list_lock_key#11 ->&dev_addr_list_lock_key#6/1 ->&dev_addr_list_lock_key/1 ->remove_cache_srcu ->&dev_addr_list_lock_key#7/1 ->_xmit_ETHER/1 ->&batadv_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key#9/1 ->&macvlan_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key#10/1 ->&macsec_netdev_addr_lock_key/1 ->&____s->seqcount#2 ->&rcu_state.gp_wq ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->mmu_notifier_invalidate_range_start ->&lock->wait_lock ->&dev_addr_list_lock_key#16 ->&dev_addr_list_lock_key/2 FD: 19 BD: 77 +...: &dev_addr_list_lock_key ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->krc.lock FD: 19 BD: 76 +...: &dev_addr_list_lock_key#2 ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock FD: 38 BD: 76 +...: _xmit_ETHER ->&c->lock ->&____s->seqcount ->&local->filter_lock ->&rdev->wiphy_work_lock ->pool_lock#2 ->&____s->seqcount#2 ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 20 BD: 76 +...: &dev_addr_list_lock_key#3 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 1090 BD: 1 +.+.: (wq_completion)ipv6_addrconf ->(work_completion)(&(&net->ipv6.addr_chk_work)->work) ->(work_completion)(&(&ifa->dad_work)->work) ->&rq->__lock FD: 1088 BD: 2 +.+.: (work_completion)(&(&net->ipv6.addr_chk_work)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 29 BD: 71 ....: &x->wait#20 ->&p->pi_lock FD: 55 BD: 89 ++--: &ndev->lock ->&ifa->lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&dir->lock#2 ->pcpu_lock ->&obj_hash[i].lock ->&tb->tb6_lock ->&n->list_lock ->batched_entropy_u32.lock ->&base->lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 3 BD: 1 +.+.: stp_proto_mutex ->llc_sap_list_lock FD: 1 BD: 1 ....: switchdev_notif_chain.lock FD: 28 BD: 67 ++++: (switchdev_blocking_notif_chain).rwsem ->&rq->__lock FD: 1 BD: 1 +.+.: br_ioctl_mutex FD: 315 BD: 8 +.+.: nf_ct_proto_mutex ->defrag4_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->defrag6_mutex ->&rq->__lock FD: 439 BD: 5 +.+.: ebt_mutex ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&c->lock ->&rq->__lock ->&n->list_lock FD: 1 BD: 1 +.+.: dsa_tag_drivers_lock FD: 1 BD: 1 +...: protocol_list_lock FD: 1 BD: 1 +...: linkfail_lock FD: 1 BD: 1 +...: rose_neigh_list_lock FD: 34 BD: 2 +.+.: drain_vmap_work ->vmap_purge_lock FD: 1 BD: 1 +.+.: proto_tab_lock#2 FD: 1 BD: 34 ++++: chan_list_lock FD: 1 BD: 5 +.+.: l2cap_sk_list.lock FD: 441 BD: 4 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->chan_list_lock ->&mm->mmap_lock FD: 1 BD: 7 +...: slock-AF_BLUETOOTH-BTPROTO_L2CAP FD: 1 BD: 1 ....: rfcomm_wq.lock FD: 1 BD: 1 +.+.: rfcomm_mutex FD: 1 BD: 1 +.+.: auth_domain_lock FD: 1 BD: 1 +.+.: registered_mechs_lock FD: 1 BD: 1 ....: atm_dev_notify_chain.lock FD: 1 BD: 1 +.+.: proto_tab_lock#3 FD: 1089 BD: 1 +.+.: vlan_ioctl_mutex ->&mm->mmap_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->vlan_ioctl_mutex.wait_lock ->&rq->__lock FD: 1 BD: 1 +.+.: rds_info_lock FD: 93 BD: 2 ++++: rds_trans_sem ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->crngs.lock ->&id_priv->lock ->&id_priv->handler_mutex ->id_table_lock ->&x->wait#46 ->&obj_hash[i].lock FD: 1 BD: 4 ....: &id_priv->lock FD: 8 BD: 68 +.+.: &xa->xa_lock#15 ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 129 BD: 72 +.+.: k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->&table->hash[i].lock ->k-clock-AF_INET6 ->&icsk->icsk_accept_queue.rskq_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->&base->lock ->&c->lock ->&____s->seqcount ->slock-AF_INET6 ->rcu_node_0 FD: 84 BD: 75 +.-.: k-slock-AF_INET6 ->pool_lock#2 ->tk_core.seq.seqcount ->&c->lock ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] ->&dir->lock ->slock-AF_INET6 ->&msk->pm.lock ->&icsk->icsk_accept_queue.rskq_lock#2 ->&____s->seqcount#2 ->&____s->seqcount ->&base->lock ->k-clock-AF_INET6 ->krc.lock ->&(&bp->lock)->lock FD: 1 BD: 97 ++..: k-clock-AF_INET6 FD: 43 BD: 94 +.-.: &tcp_hashinfo.bhash[i].lock ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&tcp_hashinfo.bhash2[i].lock ->k-clock-AF_INET6 ->clock-AF_INET ->clock-AF_INET6 ->stock_lock ->&obj_hash[i].lock ->k-clock-AF_INET FD: 42 BD: 95 +.-.: &tcp_hashinfo.bhash2[i].lock ->&____s->seqcount ->&c->lock ->pool_lock#2 ->k-clock-AF_INET6 ->clock-AF_INET ->clock-AF_INET6 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->&hashinfo->ehash_locks[i] ->stock_lock ->k-clock-AF_INET ->once_lock FD: 1 BD: 76 +.+.: &h->lhash2[i].lock FD: 1 BD: 5 +...: &list->lock#4 FD: 43 BD: 74 ++..: k-clock-AF_TIPC ->&con->sub_lock FD: 97 BD: 70 +.+.: k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&tn->nametbl_lock ->&obj_hash[i].lock ->k-clock-AF_TIPC ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&dir->lock ->rcu_node_0 ->batched_entropy_u32.lock ->k-sk_lock-AF_TIPC/1 ->&c->lock ->&list->lock#17 ->&base->lock FD: 29 BD: 72 +...: k-slock-AF_TIPC ->&list->lock#17 ->k-clock-AF_TIPC FD: 41 BD: 76 +...: &tn->nametbl_lock ->pool_lock#2 ->&service->lock ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&nt->cluster_scope_lock ->&obj_hash[i].lock ->krc.lock FD: 39 BD: 77 +...: &service->lock ->pool_lock#2 ->&obj_hash[i].lock ->&sub->lock ->krc.lock ->&c->lock ->&n->list_lock FD: 28 BD: 72 +.+.: &pnettable->lock ->&rq->__lock FD: 28 BD: 72 +.+.: smc_ib_devices.mutex ->&rq->__lock FD: 1 BD: 1 +.+.: smc_wr_rx_hash_lock FD: 1 BD: 1 +.+.: v9fs_trans_lock FD: 1 BD: 5 +...: &this->receive_lock FD: 1 BD: 1 +...: lowpan_nhc_lock FD: 319 BD: 7 +.+.: ovs_mutex ->nf_ct_proto_mutex ->&obj_hash[i].lock ->pool_lock#2 ->net_rwsem ->&rq->__lock FD: 313 BD: 9 +.+.: defrag4_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 313 BD: 9 +.+.: defrag6_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 19 +.+.: ima_keys_lock FD: 81 BD: 140 +.+.: scomp_lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->init_mm.page_table_lock ->&c->lock FD: 1 BD: 141 +.+.: subsys mutex#72 FD: 4 BD: 716 +.+.: &mm->page_table_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 49 BD: 731 +.+.: ptlock_ptr(ptdesc)#2 ->lock#4 ->key ->&____s->seqcount ->lock#5 ->&folio_wait_table[i] ->&mapping->i_private_lock ->&lruvec->lru_lock ->&((cluster_info + ci)->lock)#2 ->mmlist_lock ->&xa->xa_lock#28 ->&pool->migrate_lock ->&class->lock ->&obj_hash[i].lock ->pool_lock#2 ->&cache->free_lock ->stock_lock FD: 372 BD: 5 +.+.: k-sk_lock-AF_RXRPC ->k-slock-AF_RXRPC ->&rxnet->local_mutex ->&local->services_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&rx->incoming_lock ->&obj_hash[i].lock ->&____s->seqcount ->&rxnet->conn_lock ->&call->waitq ->(rxrpc_call_limiter).lock ->&rx->recvmsg_lock ->&rx->call_lock ->&rxnet->call_lock ->(&call->timer) ->&base->lock ->&list->lock#22 ->quarantine_lock FD: 1 BD: 6 +...: k-slock-AF_RXRPC FD: 359 BD: 6 +.+.: &rxnet->local_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->crngs.lock ->mmu_notifier_invalidate_range_start ->&c->lock ->&dir->lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->cpu_hotplug_lock ->&rq->__lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&x->wait#21 ->&n->list_lock ->&____s->seqcount#2 ->key ->pcpu_lock ->percpu_counters_lock FD: 7 BD: 76 +...: &table->hash[i].lock ->k-clock-AF_INET6 ->&table->hash2[i].lock ->k-clock-AF_INET ->clock-AF_INET ->clock-AF_INET6 FD: 1 BD: 77 +...: &table->hash2[i].lock FD: 317 BD: 2 +.+.: netstamp_work ->cpu_hotplug_lock FD: 29 BD: 7 ....: &x->wait#21 ->&p->pi_lock FD: 1 BD: 6 +.+.: &local->services_lock FD: 1 BD: 11 +.+.: &rxnet->conn_lock FD: 1 BD: 6 ....: &call->waitq FD: 1 BD: 6 +.+.: &rx->call_lock FD: 1 BD: 6 +.+.: &rxnet->call_lock FD: 86 BD: 1 +.+.: init_user_ns.keyring_sem ->key_user_lock ->root_key_user.lock ->fs_reclaim ->pool_lock#2 ->crngs.lock ->key_serial_lock ->key_construction_mutex ->&type->lock_class ->keyring_serialise_link_lock FD: 1 BD: 23 ..-.: root_key_user.lock FD: 1 BD: 24 +.+.: keyring_name_lock FD: 1 BD: 1 +.+.: template_list FD: 1 BD: 1 +.+.: idr_lock FD: 35 BD: 5 +.-.: (&rxnet->peer_keepalive_timer) FD: 32 BD: 8 +.+.: (wq_completion)krxrpcd ->(work_completion)(&rxnet->peer_keepalive_work) ->(work_completion)(&rxnet->service_conn_reaper) ->&rq->__lock FD: 29 BD: 9 +.+.: (work_completion)(&rxnet->peer_keepalive_work) ->&rxnet->peer_hash_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 1 BD: 10 +.+.: &rxnet->peer_hash_lock FD: 78 BD: 1 +.+.: ima_extend_list_mutex ->fs_reclaim ->pool_lock#2 FD: 29 BD: 4 +.+.: deferred_probe_work ->deferred_probe_mutex FD: 81 BD: 67 ++++: &(&net->nexthop.notifier_chain)->rwsem ->&rq->__lock ->&data->nh_lock FD: 491 BD: 71 +.+.: k-sk_lock-AF_INET ->k-slock-AF_INET#2 ->&table->hash[i].lock ->&tcp_hashinfo.bhash[i].lock ->pool_lock#2 ->&obj_hash[i].lock ->stock_lock ->&hashinfo->ehash_locks[i] ->batched_entropy_u32.lock ->tk_core.seq.seqcount ->batched_entropy_u16.lock ->fs_reclaim ->rcu_node_0 ->&rq->__lock ->&base->lock ->&ei->socket.wq.wait ->&c->lock ->&____s->seqcount ->&mm->mmap_lock ->k-clock-AF_INET ->&____s->seqcount#2 ->slock-AF_INET#2 ->&msk->pm.lock ->&n->list_lock FD: 88 BD: 75 +.-.: k-slock-AF_INET#2 ->&c->lock ->pool_lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&____s->seqcount ->&base->lock ->krc.lock ->&n->list_lock ->slock-AF_INET#2 ->&tcp_hashinfo.bhash[i].lock ->&____s->seqcount#2 ->&(&bp->lock)->lock FD: 1 BD: 97 ++..: k-clock-AF_INET FD: 1 BD: 1 ....: power_off_handler_list.lock FD: 1088 BD: 2 +.+.: reg_work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 67 +...: reg_pending_beacons_lock FD: 1094 BD: 2 +.+.: (work_completion)(&fw_work->work) ->fs_reclaim ->pool_lock#2 ->&fw_cache.lock ->&____s->seqcount ->tk_core.seq.seqcount ->async_lock ->init_task.alloc_lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->&obj_hash[i].lock ->(console_sem).lock ->console_owner_lock ->console_owner ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&cfs_rq->removed.lock ->umhelper_sem ->fw_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 8 BD: 3 +.+.: &fw_cache.lock ->&c->lock ->&____s->seqcount ->pool_lock#2 FD: 2 BD: 705 +.+.: &____s->seqcount#6 ->&____s->seqcount#6/1 FD: 1 BD: 1 +.+.: acpi_gpio_deferred_req_irqs_lock FD: 1 BD: 1 +.+.: clk_rpm_list_lock FD: 1 BD: 1 ....: enable_lock FD: 3 BD: 4 +.+.: subsys mutex#73 ->&k->k_lock FD: 2 BD: 12 +.+.: fw_lock ->&x->wait#22 FD: 1 BD: 13 ....: &x->wait#22 FD: 1 BD: 1 +.+.: gpd_list_lock FD: 1 BD: 9 +.+.: cdev_lock FD: 1198 BD: 3 +.+.: &tty->legacy_mutex ->&tty->read_wait ->&tty->write_wait ->&tty->ldisc_sem ->&tty->files_lock ->&port->lock ->&port->mutex ->&port_lock_key ->&f->f_lock ->&rq->__lock ->&obj_hash[i].lock ->pool_lock#2 ->tasklist_lock ->fs_reclaim ->stock_lock ->tty_ldiscs_lock ->&k->list_lock ->&k->k_lock ->&xa->xa_lock#5 ->mmu_notifier_invalidate_range_start ->&c->lock ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#26 ->&tty->ctrl.lock ->devpts_mutex ->redirect_lock ->&tty->legacy_mutex/1 ->(console_sem).lock ->console_lock ->&n->list_lock FD: 31 BD: 29 ....: &tty->read_wait ->&ep->lock FD: 31 BD: 502 ....: &tty->write_wait ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&ep->lock FD: 1188 BD: 12 ++++: &tty->ldisc_sem ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->&tty->write_wait ->&tty->read_wait ->&tty->termios_rwsem ->&mm->mmap_lock ->&port_lock_key ->&port->lock ->&tty->flow.lock ->&ldata->atomic_read_lock ->&o_tty->termios_rwsem/1 ->&buf->lock ->tty_ldiscs_lock ->&obj_hash[i].lock ->&tty->ldisc_sem/1 ->&vn->pool_lock ->&rq->__lock ->&____s->seqcount ->&dir->lock ->&pn->all_channels_lock ->init_mm.page_table_lock ->&c->lock ->&n->list_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->stock_lock ->pcpu_alloc_mutex ->&port->buf.lock/1 ->&x->wait#8 ->hci_index_ida.xa_lock ->wq_pool_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#74 ->&dev->devres_lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&rfkill->lock ->hci_dev_list_lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->(pm_chain_head).rwsem FD: 526 BD: 27 ++++: &tty->termios_rwsem ->&port->mutex ->&tty->write_wait ->&tty->read_wait ->&ldata->output_lock ->&port_lock_key ->&tty->flow.lock ->&vn->busy.lock ->&obj_hash[i].lock ->&vn->lazy.lock ->pool_lock#2 ->&rq->__lock ->(console_sem).lock ->console_lock FD: 1 BD: 15 +.+.: &tty->files_lock FD: 2 BD: 502 ....: &port->lock ->pool_lock#2 FD: 1 BD: 749 ....: &wq#2 FD: 359 BD: 1 +.+.: &type->s_umount_key#25/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->&obj_hash[i].lock ->wq_pool_mutex ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&wq->mutex ->wq_mayday_lock ->&p->pi_lock ->&x->wait ->rcu_node_0 ->&sbi->old_work_lock ->&x->wait#23 FD: 28 BD: 34 +.+.: &bdev->bd_holder_lock ->&rq->__lock FD: 448 BD: 2 ++++: &sig->exec_update_lock ->&p->alloc_lock ->&sighand->siglock ->&newf->file_lock ->batched_entropy_u64.lock ->&mm->mmap_lock ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->pool_lock#2 ->&rq->__lock ->pool_lock ->quarantine_lock FD: 1 BD: 2 +.+.: &sbi->old_work_lock FD: 1 BD: 37 ....: &x->wait#23 FD: 358 BD: 1 +.+.: &type->s_umount_key#26/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->&____s->seqcount ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&obj_hash[i].lock ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem ->lock#2 ->&x->wait#23 FD: 358 BD: 1 +.+.: &type->s_umount_key#27/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->&c->lock ->&____s->seqcount ->sb_lock ->inode_hash_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&obj_hash[i].lock ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem ->lock#2 ->&x->wait#23 FD: 435 BD: 1 +.+.: &type->s_umount_key#28/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&obj_hash[i].lock ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem ->pool_lock#2 ->percpu_counters_lock ->&sb->s_type->i_lock_key#23 ->&sb->s_type->i_mutex_key#8 ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&journal->j_state_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&journal->j_wait_done_commit ->&p->alloc_lock ->wq_pool_mutex ->&ei->i_es_lock ->ext4_grpinfo_slab_create_mutex ->&s->s_inode_list_lock ->ext4_li_mtx ->lock ->&root->kernfs_rwsem ->(console_sem).lock ->console_owner_lock ->console_owner ->&dentry->d_lock ->rcu_node_0 ->stock_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&xa->xa_lock#5 ->&____s->seqcount#2 ->&sbi->s_error_lock ->&base->lock ->&fq->mq_flush_lock ->&n->list_lock ->&wb->list_lock ->&wb->work_lock ->batched_entropy_u32.lock ->(&timer.timer) ->&rcu_state.expedited_wq ->&sb->map[i].swap_lock ->&rcu_state.gp_wq ->dq_list_lock ->&ei->i_data_sem/2 ->&s->s_dquot.dqio_sem ->dq_data_lock ->dq_state_lock ->key#3 ->key#4 ->&ei->i_prealloc_lock ->&fsnotify_mark_srcu ->&type->i_mutex_dir_key#3 ->&sem->wait_lock ->remove_cache_srcu ->sb_internal ->&sbi->s_writepages_rwsem ->&folio_wait_table[i] ->&lock->wait_lock ->smack_known_lock ->&sbi->s_fc_lock ->pcpu_lock ->&x->wait#23 ->(&sbi->s_err_report) ->lock#2 FD: 52 BD: 1 +.+.: &bgl->locks[i].lock ->&sbi->s_md_lock ->&ei->i_prealloc_lock ->(console_sem).lock ->&sbi->s_error_lock ->&sbi->s_mb_largest_free_orders_locks[i] ->&sbi->s_mb_avg_fragment_size_locks[i] ->&pa->pa_lock#2 ->&obj_hash[i].lock ->pool_lock#2 FD: 60 BD: 588 +.+.: &sb->s_type->i_lock_key#23 ->&dentry->d_lock ->&xa->xa_lock#9 ->&dquot->dq_dqb_lock FD: 210 BD: 14 ++++: &sb->s_type->i_mutex_key#8 ->&ei->i_es_lock ->&ei->i_data_sem ->&ei->xattr_sem ->tk_core.seq.seqcount ->fs_reclaim ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&mapping->i_private_lock ->&sb->s_type->i_lock_key#23 ->&wb->list_lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&c->lock ->mapping.invalidate_lock ->&dentry->d_lock ->&vn->pool_lock ->&vn->busy.lock ->init_mm.page_table_lock ->pcpu_alloc_mutex ->batched_entropy_u32.lock ->free_vmap_area_lock ->swap_cgroup_mutex ->&fq->mq_flush_lock ->&rq->__lock ->&x->wait#25 ->&((cluster_info + ci)->lock)/1 ->swapon_mutex ->proc_poll_wait.lock ->stock_lock ->&____s->seqcount#2 ->&n->list_lock ->&ei->i_raw_lock ->dq_list_lock ->&dquot->dq_lock ->dq_data_lock ->key#34 ->bit_wait_table + i ->&lruvec->lru_lock ->rcu_node_0 ->&sbi->s_orphan_lock FD: 18 BD: 189 ++++: &ei->i_es_lock ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&sbi->s_es_lock ->&obj_hash[i].lock ->key#2 ->key#5 ->key#6 ->key#7 ->&n->list_lock ->&____s->seqcount#2 FD: 138 BD: 183 ++++: &ei->i_data_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&ei->i_es_lock ->&obj_hash[i].lock ->&____s->seqcount ->&c->lock ->&ei->i_prealloc_lock ->remove_cache_srcu ->&sb->s_type->i_lock_key#23 ->&(ei->i_block_reservation_lock) ->&lg->lg_mutex ->&mapping->i_private_lock ->&ei->i_raw_lock ->&n->list_lock ->&wb->list_lock ->lock#4 ->&ret->b_state_lock ->&journal->j_revoke_lock ->key#14 ->&sbi->s_md_lock ->key#3 ->quarantine_lock ->&rq->__lock ->&pa->pa_lock#2 ->&xa->xa_lock#9 ->&____s->seqcount#2 ->rcu_node_0 ->&journal->j_state_lock ->stock_lock ->tk_core.seq.seqcount ->&x->wait#25 ->&__ctx->lock ->&base->lock ->(&timer.timer) ->dquot_srcu FD: 1 BD: 190 +.+.: &sbi->s_es_lock FD: 89 BD: 185 ++++: &journal->j_state_lock ->&journal->j_wait_done_commit ->&journal->j_wait_commit ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->&journal->j_wait_updates ->&journal->j_wait_transaction_locked ->&journal->j_list_lock ->&journal->j_wait_reserved FD: 29 BD: 186 ....: &journal->j_wait_done_commit ->&p->pi_lock FD: 29 BD: 186 ....: &journal->j_wait_commit ->&p->pi_lock FD: 207 BD: 2 +.+.: ext4_grpinfo_slab_create_mutex ->slab_mutex ->&rq->__lock FD: 82 BD: 5 +.+.: ext4_li_mtx ->fs_reclaim ->pool_lock#2 ->batched_entropy_u16.lock ->&eli->li_list_mtx ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 1 BD: 1 ....: &rs->lock FD: 229 BD: 15 ++++: &type->i_mutex_dir_key#3 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&obj_hash[i].lock ->inode_hash_lock ->&journal->j_state_lock ->&sb->s_type->i_lock_key#23 ->&ei->xattr_sem ->namespace_sem ->&c->lock ->tomoyo_ss ->&s->s_inode_list_lock ->jbd2_handle ->batched_entropy_u8.lock ->kfence_freelist_lock ->&wb->list_lock ->&n->list_lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->&wb->work_lock ->batched_entropy_u32.lock ->&ei->i_raw_lock ->rcu_node_0 ->&____s->seqcount#2 ->mapping.invalidate_lock#2 ->&sb->s_type->i_lock_key#37 ->&folio_wait_table[i] ->dq_list_lock ->&dquot->dq_lock ->dq_data_lock ->remove_cache_srcu ->krc.lock ->&sbi->s_orphan_lock ->&rcu_state.expedited_wq ->(console_sem).lock ->&sbi->s_error_lock ->key#3 ->key#4 ->&fq->mq_flush_lock ->&meta_group_info[i]->alloc_sem ->key ->pcpu_lock ->percpu_counters_lock FD: 172 BD: 182 ++++: &ei->xattr_sem ->mmu_notifier_invalidate_range_start ->&mapping->i_private_lock ->pool_lock#2 ->&ret->b_state_lock ->&journal->j_revoke_lock ->tk_core.seq.seqcount ->&ei->i_raw_lock ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&rq->__lock ->rcu_node_0 ->key#3 ->key#14 ->&sb->s_type->i_lock_key#23 ->&wb->list_lock ->stock_lock ->bit_wait_table + i ->&obj_hash[i].lock ->&____s->seqcount#2 ->&c->lock ->&cache->c_list_lock ->dquot_srcu ->dq_list_lock ->&dquot->dq_lock ->dq_data_lock ->&n->list_lock ->&sb->s_type->i_lock_key#3 ->&wb->work_lock ->&sb->map[i].swap_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&x->wait#25 ->&__ctx->lock ->&base->lock ->(&timer.timer) ->&rcu_state.gp_wq ->&ei->i_es_lock ->&ei->i_data_sem ->&folio_wait_table[i] ->&s->s_inode_list_lock ->&meta_group_info[i]->alloc_sem ->inode_hash_lock ->batched_entropy_u32.lock ->&ea_inode->i_rwsem#8/1 ->&ei->i_data_sem/3 ->&ei->i_prealloc_lock ->&fsnotify_mark_srcu ->quarantine_lock FD: 273 BD: 152 ++++: &vma->vm_lock->lock ->ptlock_ptr(ptdesc)#2 ->fs_reclaim ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->mapping.invalidate_lock ->&mm->page_table_lock ->&c->lock ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock ->remove_cache_srcu ->&n->list_lock ->&rcu_state.gp_wq ->&folio_wait_table[i] ->batched_entropy_u8.lock ->kfence_freelist_lock ->&lruvec->lru_lock ->&obj_hash[i].lock ->sb_pagefaults ->&mapping->i_private_lock ->stock_lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->key ->pcpu_lock ->percpu_counters_lock ->tk_core.seq.seqcount ->khugepaged_mm_lock ->khugepaged_wait.lock ->&ctx->map_changing_lock ->sb_pagefaults#2 ->sb_pagefaults#3 ->&ifs->state_lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#23 ->&wb->list_lock FD: 61 BD: 714 ++++: &anon_vma->rwsem ->&mm->page_table_lock ->&obj_hash[i].lock ->pool_lock#2 ->ptlock_ptr(ptdesc)#2 ->&c->lock ->&____s->seqcount ->&rq->__lock ->&sem->wait_lock ->key ->pcpu_lock ->percpu_counters_lock ->&n->list_lock ->quarantine_lock ->&cfs_rq->removed.lock ->mmu_notifier_invalidate_range_start ->stock_lock ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&base->lock ->ptlock_ptr(ptdesc)#3 ->&memcg->deferred_split_queue.split_queue_lock ->&lruvec->lru_lock FD: 455 BD: 1 +.+.: &sig->cred_guard_mutex ->fs_reclaim ->pool_lock#2 ->init_fs.lock ->&p->pi_lock ->mapping.invalidate_lock ->&folio_wait_table[i] ->&rq->__lock ->tomoyo_ss ->binfmt_lock ->init_binfmt_misc.entries_lock ->&dentry->d_lock ->&type->i_mutex_dir_key#3 ->&sb->s_type->i_lock_key#23 ->&obj_hash[i].lock ->&ei->xattr_sem ->&tsk->futex_exit_mutex ->&sig->exec_update_lock ->&fs->lock ->lock#4 ->&sb->s_type->i_mutex_key#8 ->&p->alloc_lock ->&c->lock ->&____s->seqcount ->tk_core.seq.seqcount ->&stopper->lock ->&stop_pi_lock ->&x->wait#7 ->&mm->mmap_lock ->key#8 ->&n->list_lock ->remove_cache_srcu ->rcu_node_0 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->&____s->seqcount#2 FD: 194 BD: 166 ++++: mapping.invalidate_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&ei->i_es_lock ->&ei->i_data_sem ->pool_lock#2 ->tk_core.seq.seqcount ->&c->lock ->&folio_wait_table[i] ->&rq->__lock ->&n->list_lock ->&obj_hash[i].lock ->rcu_node_0 ->&mapping->i_mmap_rwsem ->&journal->j_state_lock ->jbd2_handle ->&mapping->i_private_lock ->stock_lock ->&____s->seqcount#2 ->remove_cache_srcu ->&sb->s_type->i_lock_key#23 ->lock#5 ->&ei->i_raw_lock ->&ei->xattr_sem ->&lruvec->lru_lock ->fs_reclaim FD: 1 BD: 2 .+.+: init_binfmt_misc.entries_lock FD: 3 BD: 93 ..-.: batched_entropy_u16.lock ->crngs.lock FD: 1 BD: 731 +.+.: ptlock_ptr(ptdesc)#2/1 FD: 1 BD: 190 ....: key#2 FD: 1 BD: 19 ..-.: task_group_lock FD: 35 BD: 1 ..-.: &(&ipvs->defense_work)->timer FD: 35 BD: 1 ..-.: &(&gc_work->dwork)->timer FD: 43 BD: 2 +.+.: (work_completion)(&(&gc_work->dwork)->work) ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&rcu_state.gp_wq FD: 1 BD: 143 ...-: &____s->seqcount#7 FD: 81 BD: 2 +.+.: (work_completion)(&(&ipvs->defense_work)->work) ->&s->s_inode_list_lock ->&ipvs->dropentry_lock ->&ipvs->droppacket_lock ->&ipvs->securetcp_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 1 BD: 3 +...: &ipvs->dropentry_lock FD: 1 BD: 3 +...: &ipvs->droppacket_lock FD: 1 BD: 3 +...: &ipvs->securetcp_lock FD: 152 BD: 1 +.+.: &type->s_umount_key#29/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->&c->lock ->&____s->seqcount ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->&sb->s_type->i_mutex_key#9 ->&dentry->d_lock ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->&rq->__lock FD: 43 BD: 369 +.+.: &sb->s_type->i_lock_key#24 ->&dentry->d_lock FD: 141 BD: 4 ++++: &sb->s_type->i_mutex_key#9 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->rename_lock.seqcount ->&c->lock ->&____s->seqcount ->proc_subdir_lock ->&p->alloc_lock ->&pid->lock ->sysctl_lock ->namespace_sem ->tomoyo_ss ->&n->list_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->&____s->seqcount#2 ->remove_cache_srcu FD: 1137 BD: 2 +.+.: &p->lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->file_systems_lock ->&c->lock ->&____s->seqcount ->namespace_sem ->&of->mutex#2 ->&n->list_lock ->remove_cache_srcu ->&rq->__lock ->module_mutex ->&____s->seqcount#2 ->stock_lock ->&of->mutex ->rcu_node_0 ->&rcu_state.expedited_wq ->slab_mutex ->&obj_hash[i].lock ->binder_procs_lock ->&ping_table.lock ->binder_dead_nodes_lock ->&node->lock FD: 134 BD: 1 +.+.: &type->s_umount_key#30/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->&____s->seqcount ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#25 ->crngs.lock ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 43 BD: 503 +.+.: &sb->s_type->i_lock_key#25 ->&dentry->d_lock FD: 164 BD: 3 ++++: &type->i_mutex_dir_key#4 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&root->kernfs_rwsem ->mmu_notifier_invalidate_range_start ->&c->lock ->&____s->seqcount ->iattr_mutex ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#25 ->namespace_sem ->tk_core.seq.seqcount ->remove_cache_srcu ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->&rq->__lock ->rename_lock ->&sem->wait_lock ->&p->pi_lock ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&xa->xa_lock#5 ->stock_lock FD: 78 BD: 319 +.+.: iattr_mutex ->&rq->__lock ->fs_reclaim ->&____s->seqcount ->&c->lock ->pool_lock#2 ->tk_core.seq.seqcount FD: 111 BD: 1 +.+.: &type->s_umount_key#31 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&sb->s_type->i_lock_key#24 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&dentry->d_lock/1 ->&pid->lock ->&rq->__lock ->rcu_node_0 ->sysctl_lock FD: 29 BD: 260 ....: &x->wait#24 ->&p->pi_lock FD: 1 BD: 9 +.+.: rlock-AF_UNIX FD: 51 BD: 8 +.+.: &u->lock ->clock-AF_UNIX ->rlock-AF_UNIX ->&u->peer_wait ->&sk->sk_peer_lock ->&dentry->d_lock ->&ei->socket.wq.wait ->&p->pi_lock ->unix_gc_lock FD: 1233 BD: 3 +.+.: &sb->s_type->i_mutex_key#10 ->&net->unx.table.locks[i] ->&u->lock ->&u->peer_wait ->rlock-AF_UNIX ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&____s->seqcount ->sk_lock-AF_INET ->slock-AF_INET#2 ->clock-AF_INET ->rcu_node_0 ->&rq->__lock ->nl_table_lock ->nl_table_wait.lock ->clock-AF_NETLINK ->&nlk->wait ->(netlink_chain).rwsem ->sk_lock-AF_INET6 ->slock-AF_INET6 ->clock-AF_INET6 ->quarantine_lock ->&table->hash[i].lock ->&net->packet.sklist_lock ->&po->bind_lock ->sk_lock-AF_PACKET ->slock-AF_PACKET ->fanout_mutex ->&x->wait#2 ->clock-AF_PACKET ->pcpu_lock ->key ->percpu_counters_lock ->cb_lock ->genl_sk_destructing_waitq.lock ->sk_lock-AF_BLUETOOTH-BTPROTO_HCI ->slock-AF_BLUETOOTH-BTPROTO_HCI ->hci_dev_list_lock ->stock_lock ->sk_lock-AF_TIPC ->slock-AF_TIPC ->sk_lock-AF_CAN ->&bsd_socket_locks[i] ->slock-AF_CAN ->sk_lock-AF_NFC ->slock-AF_NFC ->clock-AF_NFC ->&cfs_rq->removed.lock ->sk_lock-AF_SMC ->slock-AF_SMC ->&smc->clcsock_release_lock ->&rcu_state.expedited_wq ->rlock-AF_PACKET ->&net->ipv4.ra_mutex ->&hashinfo->lock ->sk_lock-AF_INET/1 ->&net->sctp.addr_wq_lock ->sk_lock-AF_INET6/1 ->&hashinfo->lock#2 ->&net->xdp.lock ->&xs->map_list_lock ->&xs->mutex ->clock-AF_XDP ->sk_lock-AF_VSOCK ->slock-AF_VSOCK ->sk_lock-AF_PPPOX ->slock-AF_PPPOX ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->rlock-AF_BLUETOOTH ->rlock-AF_CAIF ->sk_lock-AF_CAIF ->slock-AF_CAIF ->sk_lock-AF_KCM ->slock-AF_KCM ->&mux->lock ->&mux->rx_lock ->&knet->mutex ->krc.lock ->sk_lock-AF_X25 ->slock-AF_X25 ->raw_lock ->clock-AF_IEEE802154 ->bcm_notifier_lock ->l2tp_ip6_lock ->sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->clock-AF_BLUETOOTH ->sco_sk_list.lock ->&meta->lock ->kfence_freelist_lock ->clock-AF_RXRPC ->(wq_completion)krxrpcd ->&wq->mutex ->rlock-AF_RXRPC ->clock-AF_RDS ->&rs->rs_recv_lock ->rds_cong_monitor_lock ->rds_cong_lock ->&rs->rs_lock ->&rs->rs_rdma_lock ->&q->lock ->rds_sock_lock ->l2cap_sk_list.lock ->sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->&chan->lock/1 ->chan_list_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->raw_notifier_lock ->pfkey_mutex ->clock-AF_KEY ->&ping_table.lock ->&vn->busy.lock ->&vn->lazy.lock ->cpu_hotplug_lock ->sk_lock-AF_PHONET ->slock-AF_PHONET ->clock-AF_PHONET FD: 1 BD: 9 +...: clock-AF_UNIX FD: 29 BD: 9 +.+.: &u->peer_wait ->&p->pi_lock FD: 271 BD: 9 .+.+: sb_writers#3 ->mount_lock ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#23 ->&wb->list_lock ->&wb->work_lock ->&type->i_mutex_dir_key#3 ->&type->i_mutex_dir_key#3/1 ->&sem->wait_lock ->&p->pi_lock ->remove_cache_srcu ->&rq->__lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->bit_wait_table + i ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&sb->s_type->i_mutex_key#8 ->tomoyo_ss ->&ei->xattr_sem ->rcu_node_0 ->quarantine_lock ->&base->lock ->&sb->map[i].swap_lock ->&____s->seqcount#2 ->&s->s_inode_list_lock ->sb_internal ->inode_hash_lock ->&fsnotify_mark_srcu ->fs_reclaim ->stock_lock ->&sb->s_type->i_lock_key#37 ->&dentry->d_lock ->crngs.lock ->&mk->mk_sem ->dq_list_lock ->&dquot->dq_lock ->dq_data_lock ->&meta_group_info[i]->alloc_sem ->batched_entropy_u32.lock ->dquot_srcu ->&ei->i_raw_lock ->&sb->s_type->i_lock_key#3 ->&type->s_vfs_rename_key#3 ->krc.lock ->&rcu_state.expedited_wq ->&sbi->s_writepages_rwsem ->&folio_wait_table[i] ->lock#5 FD: 431 BD: 2 .+.+: sb_writers#4 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->&wb->list_lock ->&sb->s_type->i_mutex_key#9 ->sysctl_lock ->&dentry->d_lock ->tomoyo_ss ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&obj_hash[i].lock ->oom_adj_mutex ->&c->lock ->&p->pi_lock ->remove_cache_srcu ->&____s->seqcount#10 ->&(&net->ipv4.ping_group_range.lock)->lock ->&rq->__lock ->rcu_node_0 ->&p->alloc_lock FD: 1 BD: 6 +.+.: &pid->lock FD: 268 BD: 3 ++++: &type->s_umount_key#32 ->&lru->node[i].lock ->&dentry->d_lock ->&sb->s_type->i_lock_key#23 ->&sbi->s_writepages_rwsem ->&sem->waiters ->&rsp->gp_wait ->&journal->j_state_lock ->&p->alloc_lock ->key#3 ->key#4 ->&sbi->s_error_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->bit_wait_table + i ->&rq->__lock ->ext4_li_mtx ->(console_sem).lock ->mount_lock ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->&c->lock ->&eli->li_list_mtx ->&wb->list_lock ->&x->wait#23 ->shrinker_mutex ->rename_lock.seqcount ->&dentry->d_lock/1 ->&bdi->wb_waitq ->&wq->mutex ->&sb->s_type->i_lock_key#3 ->&____s->seqcount#2 ->lock#5 ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&fq->mq_flush_lock ->&x->wait#25 ->(&timer.timer) ->&s->s_inode_list_lock ->&ei->i_es_lock ->inode_hash_lock ->&fsnotify_mark_srcu ->proc_subdir_lock ->&ent->pde_unload_lock ->proc_inum_ida.xa_lock ->&root->kernfs_rwsem ->&sem->wait_lock ->&p->pi_lock ->sysfs_symlink_target_lock ->kernfs_idr_lock ->wq_mayday_lock ->&x->wait ->wq_pool_mutex ->percpu_counters_lock ->pcpu_lock ->(&sbi->s_err_report) ->&lruvec->lru_lock ->key#14 ->lock#2 ->stock_lock ->&x->wait#41 ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->batched_entropy_u32.lock ->fs_reclaim ->kthread_create_lock ->&ei->i_prealloc_lock ->&sb->map[i].swap_lock ->&n->list_lock ->key#34 ->kfence_freelist_lock ->dq_list_lock ->&folio_wait_table[i] ->dq_data_lock ->dq_state_lock ->fs/quota/dquot.c:273 ->key ->&x->wait#10 ->&keyring->lock ->&type->lock_class ->krc.lock ->&meta->lock FD: 191 BD: 16 ++++: &sbi->s_writepages_rwsem ->&rsp->gp_wait ->&obj_hash[i].lock ->&x->wait#2 ->&rq->__lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&c->lock ->remove_cache_srcu ->pool_lock#2 ->lock#4 ->lock#5 ->&journal->j_state_lock ->jbd2_handle ->&xa->xa_lock#9 ->tk_core.seq.seqcount ->&base->lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq_wait->wait ->&__ctx->lock ->rcu_node_0 ->&n->list_lock ->&meta->lock ->&ei->i_es_lock ->&ei->i_data_sem ->(console_sem).lock ->console_owner_lock ->console_owner ->&rcu_state.expedited_wq ->key#3 ->key#14 ->&mapping->i_private_lock ->stock_lock ->&sb->map[i].swap_lock FD: 14 BD: 5 +.-.: (&net->can.stattimer) ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 17 ....: &sem->waiters FD: 1 BD: 185 ....: key#3 FD: 1 BD: 180 ....: key#4 FD: 14 BD: 185 +.+.: &sbi->s_error_lock ->&obj_hash[i].lock ->&base->lock FD: 28 BD: 6 +.+.: &eli->li_list_mtx ->&rq->__lock ->&obj_hash[i].lock ->pool_lock#2 FD: 186 BD: 179 ++++: jbd2_handle ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&ret->b_state_lock ->&journal->j_revoke_lock ->&ei->i_raw_lock ->&journal->j_wait_updates ->&mapping->i_private_lock ->&meta_group_info[i]->alloc_sem ->tk_core.seq.seqcount ->inode_hash_lock ->batched_entropy_u32.lock ->&ei->xattr_sem ->&obj_hash[i].lock ->&ei->i_es_lock ->&sb->s_type->i_lock_key#23 ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq ->&journal->j_state_lock ->lock#4 ->lock#5 ->&ei->i_data_sem ->&xa->xa_lock#9 ->&journal->j_list_lock ->&sbi->s_orphan_lock ->&n->list_lock ->bit_wait_table + i ->key#3 ->key#4 ->&sbi->s_error_lock ->remove_cache_srcu ->&____s->seqcount#2 ->key ->pcpu_lock ->percpu_counters_lock ->&journal->j_wait_reserved ->stock_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 84 BD: 186 +.+.: &ret->b_state_lock ->&journal->j_list_lock FD: 83 BD: 189 +.+.: &journal->j_list_lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#3 ->&wb->list_lock ->&wb->work_lock ->&obj_hash[i].lock ->pool_lock#2 ->key#24 FD: 1 BD: 185 +.+.: &journal->j_revoke_lock FD: 1 BD: 190 +.+.: &ei->i_raw_lock FD: 29 BD: 186 ....: &journal->j_wait_updates ->&p->pi_lock FD: 35 BD: 967 -.-.: &wb->work_lock ->&obj_hash[i].lock ->&base->lock FD: 103 BD: 183 ++++: &meta_group_info[i]->alloc_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&c->lock ->&____s->seqcount ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->&x->wait#25 ->&__ctx->lock ->&rq->__lock ->(&timer.timer) ->&fq->mq_flush_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&ret->b_state_lock FD: 190 BD: 14 .+.+: sb_internal ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&ei->i_raw_lock ->&ei->i_data_sem ->tk_core.seq.seqcount ->&xa->xa_lock#9 ->&ei->i_es_lock ->&mapping->i_private_lock ->&ei->i_prealloc_lock ->dquot_srcu ->dq_data_lock ->dq_list_lock ->lock#4 ->&cache->c_list_lock ->&mk->mk_decrypted_inodes_lock ->&base->lock ->&rq->__lock ->&x->wait#25 ->&__ctx->lock ->(&timer.timer) ->&sb->s_type->i_lock_key#23 ->&ei->xattr_sem ->rcu_node_0 ->bit_wait_table + i ->&sb->map[i].swap_lock ->&wb->list_lock ->&sb->s_type->i_lock_key#3 ->&wb->work_lock ->&sbi->s_orphan_lock FD: 2 BD: 186 ++++: &ei->i_prealloc_lock ->&pa->pa_lock#2 FD: 30 BD: 1 .+.+: file_rwsem ->&ctx->flc_lock ->&rq->__lock FD: 2 BD: 2 +.+.: &ctx->flc_lock ->&fll->lock FD: 1 BD: 3 +.+.: &fll->lock FD: 260 BD: 11 +.+.: &type->i_mutex_dir_key#3/1 ->rename_lock.seqcount ->&dentry->d_lock ->&rq->__lock ->&sem->wait_lock ->fs_reclaim ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->&mapping->i_private_lock ->&c->lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&obj_hash[i].lock ->inode_hash_lock ->&journal->j_state_lock ->&sb->s_type->i_lock_key#23 ->&ei->xattr_sem ->tomoyo_ss ->&sb->s_type->i_mutex_key#8 ->&s->s_inode_list_lock ->jbd2_handle ->&____s->seqcount#2 ->rcu_node_0 ->stock_lock ->&meta_group_info[i]->alloc_sem ->batched_entropy_u32.lock ->&ei->i_raw_lock ->&n->list_lock ->&type->i_mutex_dir_key#3 ->&fsnotify_mark_srcu ->sb_internal ->dq_list_lock ->&dquot->dq_lock ->dq_data_lock ->crngs.lock ->fscrypt_init_mutex ->crypto_alg_sem ->fscrypt_add_key_mutex ->&mk->mk_sem ->&sb->s_type->i_lock_key#3 ->&wb->list_lock ->&wb->work_lock ->dquot_srcu ->&type->i_mutex_dir_key#3/5 ->&sb->s_type->i_mutex_key#8/4 ->rename_lock ->krc.lock ->&sb->s_type->i_lock_key#37 ->smack_known_lock ->(console_sem).lock ->&sbi->s_error_lock ->key#3 ->key#4 ->&fq->mq_flush_lock FD: 120 BD: 1 +.+.: &type->s_umount_key#33/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#26 ->&sb->s_type->i_mutex_key#11 ->&dentry->d_lock FD: 43 BD: 7 +.+.: &sb->s_type->i_lock_key#26 ->&dentry->d_lock FD: 110 BD: 2 +.+.: &sb->s_type->i_mutex_key#11 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#26 FD: 45 BD: 1 +.+.: &type->s_umount_key#34 ->sb_lock ->&dentry->d_lock FD: 45 BD: 1 +.+.: &type->s_umount_key#35 ->sb_lock ->&dentry->d_lock FD: 45 BD: 1 +.+.: &type->s_umount_key#36 ->sb_lock ->&dentry->d_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#37/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#27 ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#27 ->&dentry->d_lock FD: 45 BD: 1 +.+.: &type->s_umount_key#38 ->sb_lock ->&dentry->d_lock FD: 1 BD: 4 +.+.: redirect_lock FD: 529 BD: 1 +.+.: &tty->atomic_write_lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&tty->termios_rwsem ->&tty->files_lock FD: 494 BD: 28 +.+.: &ldata->output_lock ->&port_lock_key ->&rq->__lock ->(console_sem).lock ->console_lock FD: 122 BD: 1 +.+.: &type->s_umount_key#39/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#28 ->fuse_mutex ->&dentry->d_lock FD: 43 BD: 366 +.+.: &sb->s_type->i_lock_key#28 ->&dentry->d_lock FD: 112 BD: 5 +.+.: fuse_mutex ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#28 ->rcu_node_0 ->rename_lock.seqcount ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&____s->seqcount#2 ->&____s->seqcount FD: 125 BD: 1 +.+.: &type->s_umount_key#40/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->&c->lock ->&____s->seqcount ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#29 ->pstore_sb_lock ->&sb->s_type->i_mutex_key#12 ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#29 ->&dentry->d_lock FD: 82 BD: 4 +.+.: &sb->s_type->i_mutex_key#12 ->fs_reclaim ->&zone->lock ->&____s->seqcount ->&psinfo->read_mutex ->&obj_hash[i].lock FD: 81 BD: 5 +.+.: &psinfo->read_mutex ->(efivars_lock).lock ->fs_reclaim ->pool_lock#2 ->(efi_runtime_lock).lock ->&obj_hash[i].lock ->&x->wait#12 ->&rq->__lock FD: 123 BD: 1 +.+.: &type->s_umount_key#41/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#30 ->bpf_preload_lock ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#30 ->&dentry->d_lock FD: 81 BD: 2 +.+.: bpf_preload_lock ->(kmod_concurrent_max).lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#17 ->&rq->__lock ->key ->pcpu_lock ->percpu_counters_lock ->&____s->seqcount ->running_helpers_waitq.lock FD: 15 BD: 1 +.-.: (&cb->timer) ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock FD: 2 BD: 1 ++++: uts_sem ->hostname_poll.wait.lock FD: 151 BD: 20 ++++: &type->i_mutex_dir_key#5 ->fs_reclaim ->&dentry->d_lock ->rename_lock.seqcount ->tomoyo_ss ->&sbinfo->stat_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&xattrs->lock ->&obj_hash[i].lock ->&simple_offset_lock_class ->&sb->s_type->i_lock_key ->&c->lock ->&____s->seqcount ->&n->list_lock ->remove_cache_srcu ->&rq->__lock ->&sem->wait_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->key ->pcpu_lock ->percpu_counters_lock ->quarantine_lock ->namespace_sem ->&____s->seqcount#2 ->stock_lock ->&rcu_state.expedited_wq ->&p->pi_lock ->&rcu_state.gp_wq FD: 455 BD: 16 .+.+: sb_writers#5 ->mount_lock ->&type->i_mutex_dir_key#5 ->&type->i_mutex_dir_key#5/1 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key ->&wb->list_lock ->&sb->s_type->i_mutex_key#13 ->&rq->__lock ->&sem->wait_lock ->&p->pi_lock ->&s->s_inode_list_lock ->&info->lock ->&obj_hash[i].lock ->pool_lock#2 ->&sbinfo->stat_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->tomoyo_ss ->&xattrs->lock ->fs_reclaim ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&dentry->d_lock ->stock_lock ->&____s->seqcount ->&cfs_rq->removed.lock ->key#9 ->&c->lock ->inode_hash_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#37 ->batched_entropy_u32.lock ->lock#10 ->&type->s_vfs_rename_key#2 ->quarantine_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&meta->lock ->kfence_freelist_lock FD: 164 BD: 18 +.+.: &type->i_mutex_dir_key#5/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->tomoyo_ss ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&xattrs->lock ->&obj_hash[i].lock ->&simple_offset_lock_class ->&sb->s_type->i_lock_key ->&c->lock ->&____s->seqcount ->&u->bindlock ->pool_lock#2 ->&sb->s_type->i_mutex_key#13/4 ->&n->list_lock ->&sem->wait_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.gp_wq ->&sb->s_type->i_mutex_key#13 ->&fsnotify_mark_srcu ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&info->lock ->&xa->xa_lock#9 ->remove_cache_srcu ->&p->pi_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&xa->xa_lock#5 ->stock_lock ->&____s->seqcount#2 ->&type->i_mutex_dir_key#5 ->&cfs_rq->removed.lock ->&type->i_mutex_dir_key#5/5 ->rename_lock ->krc.lock ->&sb->s_type->i_lock_key#37 ->&wb->list_lock ->inode_hash_lock ->quarantine_lock ->&rcu_state.expedited_wq FD: 21 BD: 10 +.+.: &f->f_lock ->fasync_lock FD: 1 BD: 2 ....: hostname_poll.wait.lock FD: 1 BD: 190 ....: key#5 FD: 1 BD: 190 ....: key#6 FD: 1 BD: 190 ....: key#7 FD: 1354 BD: 1 +.+.: &f->f_pos_lock ->&type->i_mutex_dir_key#3 ->&mm->mmap_lock ->&type->i_mutex_dir_key#4 ->sb_writers#5 ->&type->i_mutex_dir_key#5 ->sb_writers#13 ->&type->i_mutex_dir_key#9 ->sb_writers#15 ->fs_reclaim ->remove_cache_srcu ->pool_lock#2 ->&fiq->lock ->&req->waitq ->&rq->__lock ->&obj_hash[i].lock ->&fi->lock ->&mapping->i_mmap_rwsem ->&c->lock ->sb_writers#16 ->&sb->s_type->i_mutex_key#25 ->&sb->s_type->i_mutex_key#17 ->sb_writers#3 ->&type->i_mutex_dir_key#12 ->&sem->wait_lock ->&p->pi_lock ->sb_writers#9 ->&p->lock ->sb_writers#11 ->&ovl_i_mutex_dir_key[depth]#2 ->&type->i_mutex_dir_key#15 ->&sb->s_type->i_mutex_key#27 ->sb_writers#14 ->sb_writers#19 ->sb_writers#24 ->sb_writers#7 ->sb_writers#4 ->&sb->s_type->i_mutex_key#9 FD: 274 BD: 151 +.+.: &mm->mmap_lock/1 ->fs_reclaim ->pool_lock#2 ->&c->lock ->&vma->vm_lock->lock ->&mapping->i_mmap_rwsem ->&anon_vma->rwsem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&mm->page_table_lock ->ptlock_ptr(ptdesc)#2 ->&n->list_lock ->rcu_node_0 ->&rq->__lock ->remove_cache_srcu ->&sem->wait_lock ->&p->pi_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rcu_state.gp_wq ->stock_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&____s->seqcount#2 ->key ->pcpu_lock ->percpu_counters_lock FD: 441 BD: 19 +.+.: &sb->s_type->i_mutex_key#13 ->&xattrs->lock ->tk_core.seq.seqcount ->fs_reclaim ->&____s->seqcount ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key ->&info->lock ->lock#4 ->&wb->list_lock ->key#9 ->&rq->__lock ->&sb->s_type->i_mutex_key#13/4 ->&simple_offset_lock_class ->&dentry->d_lock ->&mapping->i_mmap_rwsem ->lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock ->tomoyo_ss ->rcu_node_0 ->&rcu_state.expedited_wq ->stock_lock ->pool_lock#2 ->&sem->wait_lock ->&p->pi_lock ->mount_lock ->&rcu_state.gp_wq ->namespace_sem ->mmu_notifier_invalidate_range_start ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->&sbinfo->shrinklist_lock ->&mm->mmap_lock FD: 1 BD: 2 ....: key#8 FD: 93 BD: 21 +.+.: &u->bindlock ->&net->unx.table.locks[i] ->&bsd_socket_locks[i] ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock FD: 1 BD: 25 +.+.: &bsd_socket_locks[i] FD: 447 BD: 3 +.+.: &u->iolock ->rlock-AF_UNIX ->&u->peer_wait ->&rq->__lock ->&mm->mmap_lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&u->lock ->&dir->lock ->tk_core.seq.seqcount ->rcu_node_0 ->&rcu_state.expedited_wq ->key ->pcpu_lock ->percpu_counters_lock ->&rq->__lock/1 ->unix_gc_lock ->&newf->file_lock ->stock_lock ->&cfs_rq->removed.lock FD: 31 BD: 310 ..-.: &ei->socket.wq.wait ->&p->pi_lock ->&ep->lock ->pool_lock#2 FD: 442 BD: 4 +.+.: &pipe->mutex ->&pipe->rd_wait ->&pipe->wr_wait ->fs_reclaim ->&____s->seqcount ->&mm->mmap_lock ->&rq->__lock ->&lock->wait_lock ->&obj_hash[i].lock ->stock_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->key ->pcpu_lock ->percpu_counters_lock ->pool_lock#2 ->&c->lock ->&n->list_lock FD: 39 BD: 7 ....: &pipe->rd_wait ->&p->pi_lock ->&ep->lock ->p9_poll_lock ->stock_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 29 BD: 5 ....: &pipe->wr_wait ->&p->pi_lock FD: 1 BD: 22 ....: key#9 FD: 48 BD: 1 .+.+: sb_writers#6 ->tk_core.seq.seqcount ->mount_lock ->&rq->__lock FD: 35 BD: 1 ..-.: &(&tbl->managed_work)->timer FD: 448 BD: 1 +.+.: sk_lock-AF_NETLINK ->slock-AF_NETLINK ->&mm->mmap_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->pcpu_alloc_mutex ->&obj_hash[i].lock ->pack_mutex ->batched_entropy_u32.lock ->patch_lock ->&fp->aux->used_maps_mutex ->rcu_node_0 ->&rq->__lock ->&rcu_state.expedited_wq FD: 1 BD: 2 +...: slock-AF_NETLINK FD: 1 BD: 10 +.+.: &sk->sk_peer_lock FD: 1 BD: 1 ....: &rs->lock#2 FD: 52 BD: 3 +.+.: oom_adj_mutex ->&p->alloc_lock FD: 116 BD: 1 +.+.: &group->mark_mutex ->&fsnotify_mark_srcu ->fs_reclaim ->&____s->seqcount ->&c->lock ->pool_lock#2 ->lock ->ucounts_lock ->&mark->lock ->&conn->lock ->&sb->s_type->i_lock_key#23 ->&sb->s_type->i_lock_key ->remove_cache_srcu ->&____s->seqcount#2 ->&rq->__lock FD: 9 BD: 399 +.+.: &group->inotify_data.idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 3 BD: 3 +.+.: &mark->lock ->&fsnotify_mark_srcu ->&conn->lock FD: 1 BD: 8 +.+.: &conn->lock FD: 1197 BD: 2 +.+.: &ep->mtx ->fs_reclaim ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&f->f_lock ->&ei->socket.wq.wait ->&ep->lock ->&group->notification_waitq ->&group->notification_lock ->&sighand->signalfd_wqh ->&sighand->siglock ->&mm->mmap_lock ->&rq->__lock ->&pipe->rd_wait ->key#10 ->&obj_hash[i].lock ->krc.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->&tty->ldisc_sem FD: 1198 BD: 1 +.+.: epnested_mutex ->&ep->mtx FD: 30 BD: 592 ....: &ep->lock ->&ep->wq FD: 31 BD: 7 ....: &group->notification_waitq ->&p->pi_lock ->&ep->lock FD: 1 BD: 7 +.+.: &group->notification_lock FD: 31 BD: 221 ....: &sighand->signalfd_wqh ->&p->pi_lock ->&ep->lock FD: 1125 BD: 2 .+.+: sb_writers#7 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#25 ->&wb->list_lock ->&type->i_mutex_dir_key#4 ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->remove_cache_srcu ->&root->kernfs_iattr_rwsem ->&dentry->d_lock ->tomoyo_ss ->iattr_mutex ->&sb->s_type->i_mutex_key#14 ->&rq->__lock ->&xattrs->lock FD: 7 BD: 17 +.+.: swap_lock ->&p->lock#2 FD: 85 BD: 1 .+.+: kn->active ->fs_reclaim ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&k->list_lock ->&c->lock ->uevent_sock_mutex ->&obj_hash[i].lock ->remove_cache_srcu ->&____s->seqcount ->quarantine_lock ->&rq->__lock ->&n->list_lock FD: 74 BD: 56 +.+.: &kernfs_locks->open_file_mutex[count] ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->remove_cache_srcu ->&rq->__lock ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1122 BD: 6 +.+.: &of->mutex ->&rq->__lock ->cgroup_mutex ->&p->pi_lock ->cgroup_mutex.wait_lock FD: 1 BD: 286 ..-.: rlock-AF_NETLINK FD: 29 BD: 593 ....: &ep->wq ->&p->pi_lock FD: 85 BD: 1 .+.+: kn->active#2 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&____s->seqcount ->&n->list_lock ->remove_cache_srcu ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&rq->__lock FD: 1 BD: 8 ....: &nlk->wait FD: 19 BD: 1 +.-.: (&vblank->disable_timer) ->&dev->vbl_lock FD: 32 BD: 2 +.+.: (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) ->krc.lock ->&obj_hash[i].lock ->&rq->__lock FD: 35 BD: 1 +.-.: (&q->timeout) FD: 46 BD: 1 +.+.: (wq_completion)kblockd ->(work_completion)(&q->timeout_work) ->(work_completion)(&(&hctx->run_work)->work) ->(work_completion)(&(&q->requeue_work)->work) ->&rq->__lock FD: 15 BD: 2 +.+.: (work_completion)(&q->timeout_work) ->&tags->lock ->&obj_hash[i].lock ->&base->lock FD: 84 BD: 1 .+.+: kn->active#3 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->remove_cache_srcu ->quarantine_lock FD: 169 BD: 1 .+.+: kn->active#4 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->nl_table_lock ->rlock-AF_NETLINK ->nl_table_wait.lock ->&device->physical_node_lock ->remove_cache_srcu ->udc_lock ->fw_lock ->quarantine_lock ->&rfkill->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock ->rcu_node_0 ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->&rcu_state.gp_wq FD: 35 BD: 1 ..-.: &(&ovs_net->masks_rebalance)->timer FD: 320 BD: 2 +.+.: (work_completion)(&(&ovs_net->masks_rebalance)->work) ->ovs_mutex ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 83 BD: 1 .+.+: kn->active#5 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->param_lock ->&c->lock FD: 219 BD: 3 +.+.: &of->mutex#2 ->&rq->__lock FD: 80 BD: 3 +.+.: &sb->s_type->i_mutex_key#14 ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->&rq->__lock ->&sem->wait_lock ->&p->pi_lock FD: 1 BD: 70 +.+.: disk_events_mutex FD: 117 BD: 20 +.+.: &sb->s_type->i_mutex_key#13/4 ->&simple_offset_lock_class ->tk_core.seq.seqcount ->rename_lock ->&dentry->d_lock ->&rq->__lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->stock_lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->batched_entropy_u32.lock ->&xattrs->lock ->&obj_hash[i].lock ->&sb->s_type->i_lock_key ->rcu_node_0 ->&n->list_lock FD: 33 BD: 750 +.+.: &dentry->d_lock/2 ->&dentry->d_lock/3 FD: 32 BD: 751 +.+.: &dentry->d_lock/3 ->&____s->seqcount#6 ->&wq FD: 1 BD: 706 +.+.: &____s->seqcount#6/1 FD: 80 BD: 1 .+.+: kn->active#6 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 80 BD: 1 .+.+: kn->active#7 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 71 BD: 1 .+.+: kn->active#8 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->remove_cache_srcu ->&c->lock ->&n->list_lock ->&____s->seqcount FD: 80 BD: 1 .+.+: kn->active#9 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 80 BD: 1 .+.+: kn->active#10 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 71 BD: 1 .+.+: kn->active#11 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->&n->list_lock ->remove_cache_srcu FD: 80 BD: 1 .+.+: kn->active#12 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 80 BD: 1 .+.+: kn->active#13 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#14 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 80 BD: 1 .+.+: kn->active#15 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 80 BD: 1 .+.+: kn->active#16 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 80 BD: 1 .+.+: kn->active#17 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#18 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&n->list_lock ->rcu_node_0 ->&____s->seqcount#2 FD: 80 BD: 1 .+.+: kn->active#19 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 FD: 80 BD: 1 .+.+: kn->active#20 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&n->list_lock FD: 80 BD: 1 .+.+: kn->active#21 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 218 BD: 1 .+.+: kn->active#22 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->dev_addr_sem ->&____s->seqcount FD: 216 BD: 69 ++++: dev_addr_sem ->net_rwsem ->&tn->lock ->&sdata->sec_mtx ->fs_reclaim ->pool_lock#2 ->netdev_rename_lock.seqcount ->nl_table_lock ->rlock-AF_NETLINK ->nl_table_wait.lock ->&tbl->lock ->&pn->hash_lock ->&obj_hash[i].lock ->input_pool.lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->&br->lock ->quarantine_lock ->remove_cache_srcu ->&dev_addr_list_lock_key#8 ->&hard_iface->bat_iv.ogm_buff_mutex ->rcu_node_0 ->&rcu_state.gp_wq ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock ->team->team_lock_key#17 ->team->team_lock_key#20 ->team->team_lock_key#21 ->team->team_lock_key#22 FD: 80 BD: 1 .+.+: kn->active#23 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 84 BD: 1 .+.+: kn->active#24 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&dev->power.lock ->pci_lock FD: 80 BD: 1 .+.+: kn->active#25 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#26 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#27 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 80 BD: 1 .+.+: kn->active#28 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#15 FD: 63 BD: 16 .+.+: mapping.invalidate_lock#2 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->tk_core.seq.seqcount ->&c->lock ->&n->list_lock ->&rq->__lock ->rcu_node_0 ->bit_wait_table + i ->&ret->b_uptodate_lock ->&folio_wait_table[i] ->&sb->map[i].swap_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->stock_lock FD: 80 BD: 1 .+.+: kn->active#29 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#30 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->&n->list_lock ->&rq->__lock FD: 80 BD: 1 .+.+: kn->active#31 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#32 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 79 BD: 71 +.-.: slock-AF_INET/1 ->tk_core.seq.seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&meta->lock ->kfence_freelist_lock ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->&c->lock ->&____s->seqcount ->&zone->lock ->&____s->seqcount#2 ->hrtimer_bases.lock ->&n->list_lock ->&sctp_ep_hashtable[i].lock ->clock-AF_INET ->batched_entropy_u8.lock ->init_task.mems_allowed_seq.seqcount ->&f->f_owner.lock ->elock-AF_INET ->quarantine_lock ->krc.lock ->&sctp_port_hashtable[i].lock FD: 142 BD: 67 +.+.: devnet_rename_sem ->netdev_rename_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->&root->kernfs_rwsem ->kernfs_rename_lock ->&c->lock ->&____s->seqcount ->nl_table_lock ->rlock-AF_NETLINK ->nl_table_wait.lock ->&obj_hash[i].lock ->&n->list_lock ->remove_cache_srcu ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&sem->wait_lock ->&p->pi_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->key ->pcpu_lock ->percpu_counters_lock FD: 2 BD: 68 +...: netdev_rename_lock ->netdev_rename_lock.seqcount FD: 1 BD: 315 ....: kernfs_rename_lock FD: 80 BD: 1 .+.+: kn->active#33 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#34 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 3 +.+.: &evdev->client_lock FD: 32 BD: 3 +.+.: &evdev->mutex ->&dev->mutex#2 FD: 1 BD: 72 ....: target_list_lock FD: 640 BD: 9 +.+.: sk_lock-AF_INET ->slock-AF_INET#2 ->&table->hash[i].lock ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->&icsk->icsk_accept_queue.rskq_lock ->clock-AF_INET ->&obj_hash[i].lock ->&base->lock ->fs_reclaim ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&mm->mmap_lock ->tk_core.seq.seqcount ->&sd->defer_lock ->bh_lock ->mmu_notifier_invalidate_range_start ->&rq->__lock ->&n->list_lock ->once_mutex ->batched_entropy_u32.lock ->batched_entropy_u16.lock ->&ei->socket.wq.wait ->rcu_node_0 ->quarantine_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->stock_lock ->&hashinfo->ehash_locks[i] ->free_vmap_area_lock ->&vn->busy.lock ->pcpu_alloc_mutex ->pack_mutex ->patch_lock ->&fp->aux->used_maps_mutex ->hrtimer_bases.lock ->&f->f_owner.lock ->&rcu_state.gp_wq ->&msk->pm.lock ->&dir->lock ->k-slock-AF_INET/1 ->k-sk_lock-AF_INET/1 ->k-slock-AF_INET#2 ->k-clock-AF_INET ->&sb->s_type->i_lock_key#9 ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->crngs.lock ->&token_hash[i].lock ->k-sk_lock-AF_INET ->&vn->pool_lock ->&rcu_state.expedited_wq ->cpool_mutex ->cpu_hotplug_lock ->&sem->wait_lock ->&p->pi_lock ->&sighand->siglock ->&sctp_port_hashtable[i].lock ->lock ->&asoc->wait ->sctp_assocs_id_lock FD: 75 BD: 79 +.-.: slock-AF_INET#2 ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] ->tk_core.seq.seqcount ->(&req->rsk_timer) ->&base->lock ->&icsk->icsk_accept_queue.rskq_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u32.lock ->&sk->sk_lock.wq ->hrtimer_bases.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->krc.lock ->elock-AF_INET FD: 1 BD: 97 ++..: clock-AF_INET FD: 80 BD: 1 .+.+: kn->active#35 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 80 BD: 1 .+.+: kn->active#36 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#37 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&rq->__lock FD: 1 BD: 3 ....: key#10 FD: 1 BD: 5 ....: &vdev->fh_lock FD: 83 BD: 2 +.+.: &dev_instance->mutex ->fs_reclaim ->pool_lock#2 ->vicodec_core:1852:(hdl)->_lock ->&c->lock ->&vdev->fh_lock ->&m2m_dev->job_spinlock ->&q->done_wq ->&q->mmap_lock ->&obj_hash[i].lock ->&____s->seqcount ->&n->list_lock FD: 4 BD: 3 +.+.: vicodec_core:1852:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 88 BD: 1 +.+.: &mdev->req_queue_mutex ->&dev_instance->mutex ->&vdev->fh_lock ->&mdev->graph_mutex ->vicodec_core:1852:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 ->vim2m:1183:(hdl)->_lock ->&dev->dev_mutex ->&dev->mutex#3 FD: 1 BD: 4 ....: &m2m_dev->job_spinlock FD: 1 BD: 6 ....: &q->done_wq FD: 1 BD: 6 +.+.: &q->mmap_lock FD: 83 BD: 2 +.+.: &dev->dev_mutex ->fs_reclaim ->pool_lock#2 ->vim2m:1183:(hdl)->_lock ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->&vdev->fh_lock ->&m2m_dev->job_spinlock ->&q->done_wq ->&q->mmap_lock FD: 4 BD: 3 +.+.: vim2m:1183:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 80 BD: 1 .+.+: kn->active#38 ->fs_reclaim ->&c->lock ->&____s->seqcount ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 1 +.+.: fh->state->lock FD: 3 BD: 1 +.+.: &vcapture->lock ->&q->done_wq ->&q->mmap_lock FD: 4 BD: 2 +.+.: &dev->mutex#3 ->&vdev->fh_lock ->&q->done_wq ->&q->mmap_lock FD: 176 BD: 30 +.+.: &lo->lo_mutex ->fs_reclaim ->pool_lock#2 ->wq_pool_mutex ->&bdev->bd_holder_lock ->&q->limits_lock ->lock ->&root->kernfs_rwsem ->&bdev->bd_size_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->&n->list_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&lock->wait_lock FD: 325 BD: 30 +.+.: &nbd->config_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&bdev->bd_size_lock ->&q->queue_lock ->&ACCESS_PRIVATE(sdp, lock) ->set->srcu ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#9 ->&c->lock ->&____s->seqcount ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->mq_freeze_wq ->fs_reclaim ->&set->tag_list_lock ->lock ->&root->kernfs_rwsem ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->cpu_hotplug_lock ->&q->limits_lock ->uevent_sock_mutex ->&bdev->bd_holder_lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#8 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->mount_lock FD: 2 BD: 29 +.+.: &new->lock ->&mtdblk->cache_mutex FD: 1 BD: 30 +.+.: &mtdblk->cache_mutex FD: 80 BD: 1 .+.+: kn->active#39 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 439 BD: 1 +.+.: &mtd->master.chrdev_lock ->&mm->mmap_lock FD: 29 BD: 254 -.-.: &x->wait#25 ->&p->pi_lock FD: 1 BD: 215 +.+.: &__ctx->lock FD: 40 BD: 2 +.+.: (work_completion)(&(&hctx->run_work)->work) ->rcu_node_0 FD: 53 BD: 247 -.-.: &fq->mq_flush_lock ->&q->requeue_lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&x->wait#25 ->bit_wait_table + i ->pool_lock#2 ->&x->wait#33 ->&folio_wait_table[i] ->&xa->xa_lock#9 ->&sbi->cp_wait ->quarantine_lock ->&base->lock FD: 1 BD: 250 -.-.: &q->requeue_lock FD: 41 BD: 2 +.+.: (work_completion)(&(&q->requeue_work)->work) ->&q->requeue_lock ->&hctx->lock ->&__ctx->lock ->rcu_node_0 ->&rq->__lock FD: 35 BD: 1 ..-.: &(&wb->dwork)->timer FD: 400 BD: 1 +.+.: (wq_completion)writeback ->(work_completion)(&(&wb->dwork)->work) ->(work_completion)(&(&wb->bw_dwork)->work) ->(work_completion)(&barr->work) FD: 397 BD: 2 +.+.: (work_completion)(&(&wb->dwork)->work) ->&wb->work_lock ->&wb->list_lock ->&p->sequence ->key#11 ->&sb->s_type->i_lock_key#23 ->&sbi->s_writepages_rwsem ->&rq->__lock ->&xa->xa_lock#9 ->mmu_notifier_invalidate_range_start ->kfence_freelist_lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->lock#4 ->lock#5 ->&pl->lock ->&sb->s_type->i_lock_key#34 ->fs_reclaim ->&tree->tree_lock ->&tree->hash_lock ->&node->lock_wq ->&mapping->i_private_lock ->&bdi->wb_waitq ->&sb->s_type->i_lock_key#35 ->&ip->i_flags_lock ->&ifs->state_lock ->&xfs_nondir_ilock_class#3 ->sb_internal#2 ->&____s->seqcount#2 ->&sb->s_type->i_lock_key#36 ->&sci->sc_state_lock ->&sci->sc_wait_daemon ->&sci->sc_wait_request ->&nilfs->ns_segctor_sem ->&sb->s_type->i_lock_key#38 ->btrfs-tree-01 ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->&wq->list_lock ->&eb->refs_lock ->btrfs-tree-00 ->btrfs-csum-00 ->&tree->lock ->&tree->lock#2 ->&ei->ordered_tree_lock ->&sb->s_type->i_lock_key#41 ->&s->s_inode_list_lock ->&nm_i->build_lock ->key#32 ->&sbi->inode_lock[i] ->inode_hash_lock ->&nm_i->nat_tree_lock ->&sbi->node_write ->&io->io_rwsem ->&__ctx->lock ->&fs_info->delayed_iput_lock ->&p->pi_lock ->btrfs-quota-00 ->&folio_wait_table[i] ->btrfs-extent-00 ->btrfs-free-space-00 ->btrfs-extent-01#2 ->&meta->lock ->&sb->s_type->i_lock_key#45 ->rcu_node_0 ->&n->list_lock ->&sb->s_type->i_lock_key#50 ->&(log)->gclock ->&et->lock ->&sb->s_type->i_lock_key#49 ->&hip->extents_lock ->&tree->tree_lock#2 ->&HFSPLUS_I(inode)->extents_lock ->&tree->hash_lock#2 ->&node->lock_wq#2 ->(console_sem).lock ->remove_cache_srcu FD: 2 BD: 4 +.-.: &p->sequence ->key#13 FD: 1 BD: 969 -.-.: key#11 FD: 29 BD: 1 +.-.: (&journal->j_commit_timer) ->&p->pi_lock FD: 98 BD: 1 +.+.: &journal->j_checkpoint_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&journal->j_state_lock FD: 29 BD: 186 ....: &journal->j_wait_transaction_locked ->&p->pi_lock FD: 1 BD: 185 +.+.: &sbi->s_md_lock FD: 1 BD: 1 ....: &journal->j_fc_wait FD: 1 BD: 1 +.+.: &journal->j_history_lock FD: 80 BD: 1 .+.+: kn->active#40 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 4 +.+.: destroy_lock FD: 35 BD: 1 ..-.: fs/notify/mark.c:89 FD: 86 BD: 2 +.+.: connector_reaper_work ->destroy_lock ->&ACCESS_PRIVATE(sdp, lock) ->&fsnotify_mark_srcu ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#9 ->pool_lock#2 ->&base->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->pool_lock FD: 85 BD: 2 +.+.: (reaper_work).work ->destroy_lock ->&ACCESS_PRIVATE(sdp, lock) ->&fsnotify_mark_srcu ->&obj_hash[i].lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&x->wait#9 ->&rq->__lock ->pool_lock#2 ->stock_lock ->&cfs_rq->removed.lock ->&base->lock FD: 1088 BD: 1 +.+.: nlk_cb_mutex-ROUTE ->fs_reclaim ->&c->lock ->pool_lock#2 ->&____s->seqcount ->rtnl_mutex ->rlock-AF_NETLINK ->&n->list_lock ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq FD: 13 BD: 68 +...: fib_info_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 89 BD: 68 +...: &net->sctp.local_addr_lock ->&net->sctp.addr_wq_lock FD: 88 BD: 70 +.-.: &net->sctp.addr_wq_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->slock-AF_INET/1 ->slock-AF_INET6/1 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->k-slock-AF_INET6/1 ->quarantine_lock ->&____s->seqcount#2 FD: 80 BD: 2 +.+.: (work_completion)(&ht->run_work) ->&ht->mutex ->&rq->__lock FD: 79 BD: 82 +.+.: &ht->mutex ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock ->rhashtable_bucket ->&ht->lock ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->&____s->seqcount ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->rcu_node_0 ->&____s->seqcount#2 ->remove_cache_srcu ->&rcu_state.expedited_wq ->&base->lock ->quarantine_lock FD: 1 BD: 265 ....: rhashtable_bucket/1 FD: 4 BD: 85 +.+.: &ht->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 4 +...: clock-AF_NETLINK FD: 1 BD: 67 +...: &dev_addr_list_lock_key#4 FD: 28 BD: 76 .+.+: netpoll_srcu ->&rq->__lock FD: 14 BD: 82 +.-.: &in_dev->mc_tomb_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&n->list_lock FD: 17 BD: 77 +.-.: &im->lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock FD: 1 BD: 76 +.+.: cbs_list_lock FD: 35 BD: 72 +...: &net->ipv6.addrconf_hash_lock ->&obj_hash[i].lock ->&base->lock FD: 36 BD: 90 +...: &ifa->lock ->batched_entropy_u32.lock ->crngs.lock ->&obj_hash[i].lock ->&base->lock FD: 51 BD: 91 +.-.: &tb->tb6_lock ->&net->ipv6.fib6_walker_lock ->&____s->seqcount ->&c->lock ->pool_lock#2 ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->rlock-AF_NETLINK ->rt6_exception_lock ->&data->fib_event_queue_lock ->&n->list_lock ->quarantine_lock ->&____s->seqcount#2 ->stock_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock FD: 1 BD: 92 ++..: &net->ipv6.fib6_walker_lock FD: 652 BD: 68 +.+.: sk_lock-AF_INET6 ->slock-AF_INET6 ->&table->hash[i].lock ->batched_entropy_u32.lock ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->fs_reclaim ->&mm->mmap_lock ->once_lock ->tk_core.seq.seqcount ->&zone->lock ->&n->list_lock ->clock-AF_INET6 ->&dccp_hashinfo.bhash[i].lock ->&ei->socket.wq.wait ->&icsk->icsk_accept_queue.rskq_lock#2 ->&vn->pool_lock ->&vn->busy.lock ->stock_lock ->pcpu_alloc_mutex ->pack_mutex ->patch_lock ->&fp->aux->used_maps_mutex ->&sctp_port_hashtable[i].lock ->&____s->seqcount#2 ->crngs.lock ->&base->lock ->rcu_node_0 ->&rq->__lock ->&asoc->wait ->lock ->mmu_notifier_invalidate_range_start ->&dir->lock ->k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->&sb->s_type->i_lock_key#9 ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->k-sk_lock-AF_INET6 ->sk_lock-AF_INET6/1 ->&msk->pm.lock ->krc.lock ->sctp_assocs_id_lock ->&list->lock#25 ->crypto_alg_sem ->(kmod_concurrent_max).lock ->&x->wait#17 ->running_helpers_waitq.lock ->key ->pcpu_lock ->percpu_counters_lock ->(crypto_chain).rwsem ->&x->wait#20 ->(&timer.timer) ->&sctp_ep_hashtable[i].lock ->&rcu_state.gp_wq ->tcpv6_prot_mutex ->device_spinlock ->&sw_ctx_rx->wq ->&hashinfo->ehash_locks[i] ->&token_hash[i].lock ->&ndev->lock ->acaddr_hash_lock ->&tb->tb6_lock ->cpu_hotplug_lock FD: 52 BD: 81 +.-.: slock-AF_INET6 ->&obj_hash[i].lock ->pool_lock#2 ->&token_hash[i].lock ->crngs.lock ->&c->lock ->&n->list_lock ->tk_core.seq.seqcount ->krc.lock ->batched_entropy_u32.lock ->sctp_assocs_id_lock ->&base->lock ->&list->lock#25 ->quarantine_lock FD: 2 BD: 100 ++-.: clock-AF_INET6 ->pool_lock#2 FD: 1 BD: 1 +.+.: userns_state_mutex FD: 449 BD: 1 +.+.: sk_lock-AF_UNIX ->slock-AF_UNIX ->&mm->mmap_lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&rq->__lock ->&vn->pool_lock ->&vn->busy.lock ->&____s->seqcount ->rcu_node_0 ->stock_lock ->pcpu_alloc_mutex ->&obj_hash[i].lock ->once_lock ->pack_mutex ->batched_entropy_u32.lock ->patch_lock ->&fp->aux->used_maps_mutex FD: 1 BD: 2 +...: slock-AF_UNIX FD: 80 BD: 1 .+.+: kn->active#41 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 1108 BD: 5 +.+.: nlk_cb_mutex-GENERIC ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->rtnl_mutex ->&rdev->wiphy.mtx ->rlock-AF_NETLINK ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&rq->__lock ->genl_mutex ->&devlink->lock_key#20 ->&devlink->lock_key#21 ->&devlink->lock_key#22 ->rcu_node_0 ->&devlink->lock_key#23 FD: 23 BD: 72 +.-.: &rdev->bss_lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->quarantine_lock ->init_task.mems_allowed_seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock FD: 89 BD: 1 +.-.: (&net->sctp.addr_wq_timer) ->&net->sctp.addr_wq_lock FD: 87 BD: 2 +.+.: (work_completion)(&aux->work) ->pack_mutex ->&obj_hash[i].lock ->pool_lock#2 ->pcpu_lock ->&vn->busy.lock ->&vn->lazy.lock ->stock_lock ->&rq->__lock ->rcu_node_0 ->map_idr_lock FD: 1 BD: 68 +.+.: napi_hash_lock FD: 14 BD: 67 ++..: lapb_list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock FD: 4 BD: 67 ++.-: x25_neigh_list_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 67 +...: _xmit_SLIP FD: 15 BD: 1 +.-.: (&eql->timer) ->&eql->queue.lock ->&obj_hash[i].lock ->&base->lock FD: 4 BD: 70 +.-.: &eql->queue.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 80 BD: 139 +.+.: xps_map_mutex ->fs_reclaim ->pool_lock#2 ->jump_label_mutex ->&rq->__lock FD: 34 BD: 1 +.-.: &tx->clean_lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 1 BD: 79 +.+.: rtnl_mutex.wait_lock FD: 28 BD: 72 +.+.: &data->mutex ->&rq->__lock FD: 19 BD: 77 +...: &local->filter_lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount ->&n->list_lock FD: 35 BD: 81 ..-.: &rdev->wiphy_work_lock FD: 451 BD: 2 +.+.: (work_completion)(&rdev->wiphy_work) ->&rdev->wiphy.mtx ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 67 +...: _xmit_VOID FD: 1 BD: 67 +...: _xmit_X25 FD: 4 BD: 68 +...: &lapbeth->up_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 51 BD: 68 +.-.: &lapb->lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount ->init_task.mems_allowed_seq.seqcount ->&n->list_lock ->&____s->seqcount#2 ->&list->lock#27 FD: 1 BD: 149 +.-.: &ul->lock FD: 2 BD: 184 +.+.: &(ei->i_block_reservation_lock) ->key#14 FD: 82 BD: 2 +.+.: (work_completion)(&work->work) ->devices_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->quarantine_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&base->lock FD: 1088 BD: 2 +.+.: (work_completion)(&(&ifa->dad_work)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 40 BD: 92 +.-.: rt6_exception_lock ->&____s->seqcount ->once_lock ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->&dir->lock#2 ->krc.lock FD: 20 BD: 11 +.+.: fasync_lock ->&new->fa_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 1 BD: 221 ....: &tty->ctrl.lock FD: 527 BD: 24 +.+.: &buf->lock ->&tty->termios_rwsem FD: 33 BD: 28 ....: &tty->flow.lock ->led_lock ->&tty->write_wait FD: 64 BD: 67 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->_xmit_ETHER#2 ->&sch->root_lock_key#3 ->_xmit_SLIP#2 ->_xmit_NETROM FD: 46 BD: 101 +.-.: _xmit_ETHER#2 ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock FD: 528 BD: 13 +.+.: &ldata->atomic_read_lock ->&tty->termios_rwsem ->&rq->__lock ->&o_tty->termios_rwsem/1 ->&tty->read_wait FD: 1 BD: 73 ....: class FD: 1 BD: 73 ....: (&tbl->proxy_timer) FD: 88 BD: 5 +.+.: &net->packet.sklist_lock ->clock-AF_PACKET ->fanout_mutex ->&rq->__lock FD: 464 BD: 4 +.+.: sk_lock-AF_PACKET ->slock-AF_PACKET ->&po->bind_lock ->&rq->__lock ->&obj_hash[i].lock ->&x->wait#2 ->&mm->mmap_lock ->fs_reclaim ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->pcpu_alloc_mutex ->pack_mutex ->batched_entropy_u32.lock ->patch_lock ->&fp->aux->used_maps_mutex ->remove_cache_srcu ->&c->lock ->key ->pcpu_lock ->percpu_counters_lock ->pool_lock#2 ->&n->list_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_lock ->rcu_state.exp_mutex FD: 1 BD: 5 +...: slock-AF_PACKET FD: 37 BD: 71 +.+.: &po->bind_lock ->ptype_lock ->pool_lock#2 ->&dir->lock#2 ->&match->lock FD: 35 BD: 1 ..-.: &(&idev->mc_ifc_work)->timer FD: 173 BD: 1 +.+.: (wq_completion)mld ->(work_completion)(&(&idev->mc_ifc_work)->work) ->(work_completion)(&(&idev->mc_dad_work)->work) ->&rq->__lock FD: 171 BD: 2 +.+.: (work_completion)(&(&idev->mc_ifc_work)->work) ->&idev->mc_lock ->&rq->__lock FD: 13 BD: 87 +.-.: &ul->lock#2 ->pool_lock#2 ->&dir->lock#2 ->&c->lock FD: 18 BD: 135 ++--: &n->lock ->&____s->seqcount#8 ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&(&n->ha_lock)->lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 137 +.--: &____s->seqcount#8 FD: 35 BD: 1 ..-.: &(&idev->mc_dad_work)->timer FD: 171 BD: 2 +.+.: (work_completion)(&(&idev->mc_dad_work)->work) ->&idev->mc_lock ->&rq->__lock FD: 4 BD: 163 +.-.: rlock-AF_PACKET ->tk_core.seq.seqcount FD: 1 BD: 1 +...: wlock-AF_PACKET FD: 35 BD: 1 ..-.: &(&ifa->dad_work)->timer FD: 86 BD: 6 +.+.: fanout_mutex ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&po->bind_lock FD: 1 BD: 6 ++..: clock-AF_PACKET FD: 35 BD: 1 ..-.: net/core/link_watch.c:31 FD: 79 BD: 68 +.-.: &dev->tx_global_lock ->_xmit_ETHER#2 ->&obj_hash[i].lock ->&base->lock ->&qdisc_xmit_lock_key ->&qdisc_xmit_lock_key#4 ->&qdisc_xmit_lock_key#5 ->_xmit_NETROM ->_xmit_NONE#2 ->&qdisc_xmit_lock_key#6 ->&qdisc_xmit_lock_key#7 ->&qdisc_xmit_lock_key#8 ->&qdisc_xmit_lock_key#2 ->&qdisc_xmit_lock_key#3 ->&qdisc_xmit_lock_key#9 ->&qdisc_xmit_lock_key#10 ->&qdisc_xmit_lock_key#11 ->&qdisc_xmit_lock_key#12 ->&qdisc_xmit_lock_key#13 ->&batadv_netdev_xmit_lock_key ->&qdisc_xmit_lock_key#14 ->&qdisc_xmit_lock_key#15 FD: 1 BD: 67 +...: &sch->root_lock_key FD: 1 BD: 135 +.-.: lock#8 FD: 1 BD: 137 ..-.: id_table_lock FD: 1 BD: 67 +...: &sch->root_lock_key#2 FD: 44 BD: 2 +.+.: (work_completion)(&w->work)#2 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->nf_conntrack_mutex ->nf_conntrack_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->rcu_node_0 FD: 1 BD: 140 ...-: &____s->seqcount#9 FD: 35 BD: 1 ..-.: drivers/base/dd.c:321 FD: 41 BD: 2 +.+.: (deferred_probe_timeout_work).work ->device_links_lock ->deferred_probe_mutex ->&x->wait#10 ->&rq->__lock ->&obj_hash[i].lock FD: 94 BD: 1 .+.+: &type->s_umount_key#42 ->&sb->s_type->i_lock_key#3 ->&xa->xa_lock#9 ->mmu_notifier_invalidate_range_start ->batched_entropy_u8.lock ->kfence_freelist_lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->lock#4 ->lock#5 ->&wb->list_lock ->&____s->seqcount#2 ->&rq->__lock ->&rq_wait->wait ->&__ctx->lock ->&sb->map[i].swap_lock ->rcu_node_0 FD: 1 BD: 966 -.-.: &s->s_inode_wblist_lock FD: 1 BD: 967 -.-.: key#12 FD: 100 BD: 184 +.+.: &lg->lg_mutex ->&ei->i_prealloc_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&c->lock ->pool_lock#2 ->lock#4 ->&mapping->i_private_lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->&pa->pa_lock ->&lg->lg_prealloc_lock ->&obj_hash[i].lock ->&____s->seqcount#2 FD: 1 BD: 185 +.+.: &pa->pa_lock FD: 1 BD: 185 +.+.: &lg->lg_prealloc_lock FD: 35 BD: 3 -.-.: &ei->i_completed_io_lock FD: 192 BD: 1 +.+.: (wq_completion)ext4-rsv-conversion ->(work_completion)(&ei->i_rsv_conversion_work) ->&rq->__lock FD: 191 BD: 2 +.+.: (work_completion)(&ei->i_rsv_conversion_work) ->&ei->i_completed_io_lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->pool_lock#2 ->&ext4__ioend_wq[i] ->&ret->b_uptodate_lock ->&xa->xa_lock#9 ->&folio_wait_table[i] ->&rq->__lock ->quarantine_lock ->mmu_notifier_invalidate_range_start ->&meta->lock ->kfence_freelist_lock ->&ei->i_es_lock ->&ei->i_data_sem ->&ei->i_raw_lock FD: 1 BD: 186 ....: &journal->j_wait_reserved FD: 1 BD: 3 ....: &ext4__ioend_wq[i] FD: 35 BD: 1 ..-.: &(&wb->bw_dwork)->timer FD: 82 BD: 2 +.+.: (work_completion)(&(&wb->bw_dwork)->work) ->&wb->list_lock FD: 1 BD: 95 +.-.: &ct->lock FD: 52 BD: 1 +.-.: (&lapb->t1timer) ->&lapb->lock FD: 80 BD: 1 +.-.: (&dev->watchdog_timer) ->&dev->tx_global_lock FD: 16 BD: 1 +.-.: (&dom->period_timer) ->key#13 ->&p->sequence ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 968 -.-.: key#13 FD: 5 BD: 127 +.-.: &nf_conntrack_locks[i] ->&nf_conntrack_locks[i]/1 ->batched_entropy_u8.lock FD: 4 BD: 128 +.-.: &nf_conntrack_locks[i]/1 ->batched_entropy_u8.lock FD: 14 BD: 96 +.-.: &hashinfo->ehash_locks[i] ->&obj_hash[i].lock ->&base->lock FD: 2 BD: 136 +.-.: &(&n->ha_lock)->lock ->&____s->seqcount#8 FD: 1 BD: 80 ..-.: (&req->rsk_timer) FD: 1 BD: 80 +.-.: &icsk->icsk_accept_queue.rskq_lock FD: 1 BD: 10 +.-.: &sd->defer_lock FD: 89 BD: 5 +.-.: (&icsk->icsk_retransmit_timer) ->slock-AF_INET#2 ->k-slock-AF_INET#2 FD: 89 BD: 5 +.-.: (&icsk->icsk_delack_timer) ->slock-AF_INET#2 ->k-slock-AF_INET#2 FD: 19 BD: 75 +.-.: tcp_metrics_lock ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->krc.lock FD: 1 BD: 186 ....: key#14 FD: 28 BD: 180 +.+.: &sbi->s_orphan_lock ->&rq->__lock FD: 2 BD: 966 -.-.: &pl->lock ->key#12 FD: 35 BD: 1 ..-.: &(&tbl->gc_work)->timer FD: 48 BD: 2 +.+.: (work_completion)(&(&tbl->gc_work)->work) ->&tbl->lock ->&rq->__lock FD: 3 BD: 10 +.+.: once_mutex ->crngs.lock FD: 1 BD: 3 +.+.: module_mutex FD: 200 BD: 2 +.+.: (work_completion)(&sbi->s_sb_upd_work) ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->key#3 ->key#4 ->&sbi->s_error_lock ->tk_core.seq.seqcount ->&fq->mq_flush_lock ->bit_wait_table + i ->&rq->__lock ->&root->kernfs_rwsem ->kernfs_notify_lock FD: 134 BD: 1 +.+.: &type->s_umount_key#43/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->&sb->s_type->i_lock_key#31 ->crngs.lock ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 43 BD: 407 +.+.: &sb->s_type->i_lock_key#31 ->&dentry->d_lock FD: 1122 BD: 1 .+.+: sb_writers#8 ->mount_lock ->&type->i_mutex_dir_key#6 ->fs_reclaim ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&c->lock ->&type->i_mutex_dir_key#6/1 ->&n->list_lock ->&rq->__lock FD: 96 BD: 2 ++++: &type->i_mutex_dir_key#6 ->tomoyo_ss ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#31 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->stock_lock FD: 80 BD: 1 ++++: kn->active#42 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 FD: 134 BD: 1 +.+.: &type->s_umount_key#44/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#32 ->crngs.lock ->&root->kernfs_supers_rwsem ->&dentry->d_lock ->&c->lock FD: 43 BD: 503 +.+.: &sb->s_type->i_lock_key#32 ->&dentry->d_lock FD: 121 BD: 1 +.+.: &type->s_umount_key#45 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->percpu_ref_switch_lock ->&root->kernfs_supers_rwsem ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#32 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->inode_hash_lock ->pool_lock#2 ->&fsnotify_mark_srcu ->&dentry->d_lock/1 FD: 1 BD: 1 ..-.: percpu_ref_switch_waitq.lock FD: 1098 BD: 2 +.+.: (work_completion)(&cgrp->bpf.release_work) ->cgroup_mutex ->cgroup_mutex.wait_lock ->&p->pi_lock ->percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 18 +.+.: cgroup_mutex.wait_lock FD: 1103 BD: 1 +.+.: (wq_completion)cgroup_destroy ->(work_completion)(&css->destroy_work) ->(work_completion)(&(&css->destroy_rwork)->work) FD: 1096 BD: 2 +.+.: (work_completion)(&css->destroy_work) ->cgroup_mutex ->cgroup_mutex.wait_lock ->&p->pi_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1101 BD: 2 +.+.: (work_completion)(&(&css->destroy_rwork)->work) ->percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 ->&cgrp->pidlist_mutex ->(wq_completion)cgroup_pidlist_destroy ->&wq->mutex ->cgroup_mutex ->pcpu_lock ->&root->kernfs_rwsem ->kernfs_idr_lock ->krc.lock ->cgroup_mutex.wait_lock ->&p->pi_lock FD: 82 BD: 13 +.+.: &cgrp->pidlist_mutex ->css_set_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock FD: 84 BD: 3 +.+.: (wq_completion)cgroup_pidlist_destroy ->(work_completion)(&(&l->destroy_dwork)->work) FD: 1122 BD: 2 .+.+: sb_writers#9 ->mount_lock ->&type->i_mutex_dir_key#7 ->fs_reclaim ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&type->i_mutex_dir_key#7/1 ->&sem->wait_lock ->&p->pi_lock ->pool_lock#2 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#32 ->&wb->list_lock ->remove_cache_srcu ->&c->lock ->&n->list_lock ->&rq->__lock FD: 107 BD: 3 ++++: &type->i_mutex_dir_key#7 ->tomoyo_ss ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#32 ->&sem->wait_lock ->pool_lock#2 ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rcu_state.expedited_wq FD: 1 BD: 18 +.+.: &dom->lock FD: 80 BD: 1 .+.+: kn->active#43 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 317 BD: 1 .+.+: kn->active#44 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->cpu_hotplug_lock FD: 1 BD: 263 ....: &newf->resize_wait FD: 11 BD: 168 ..-.: &kcov->lock ->kcov_remote_lock FD: 440 BD: 1 .+.+: sb_writers#10 ->&mm->mmap_lock ->&attr->mutex FD: 439 BD: 2 +.+.: &attr->mutex ->&mm->mmap_lock FD: 1 BD: 67 +.+.: &wpan_dev->association_lock FD: 1 BD: 71 ++.-: &pn->hash_lock FD: 52 BD: 1 +...: &net->ipv6.fib6_gc_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 67 +...: _xmit_IEEE802154 FD: 1 BD: 5 +.+.: &xa->xa_lock#16 FD: 1 BD: 8 ....: genl_sk_destructing_waitq.lock FD: 4 BD: 73 +...: &rdev->beacon_registrations_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 35 BD: 74 +.-.: &rdev->mgmt_registrations_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 73 +...: &wdev->pmsr_lock FD: 1 BD: 68 +.+.: reg_indoor_lock FD: 45 BD: 1 +.+.: &type->s_umount_key#46 ->sb_lock ->&dentry->d_lock FD: 128 BD: 1 +.+.: &sb->s_type->i_mutex_key#16 ->namespace_sem FD: 177 BD: 2 +.+.: (work_completion)(&w->w) ->nfc_devlist_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 4 +.+.: &genl_data->genl_data_mutex FD: 78 BD: 15 +.+.: swap_cgroup_mutex ->fs_reclaim ->&____s->seqcount FD: 1 BD: 15 +.+.: &((cluster_info + ci)->lock)/1 FD: 53 BD: 15 +.+.: swapon_mutex ->swap_lock ->percpu_ref_switch_lock ->(console_sem).lock FD: 6 BD: 162 +.+.: &p->lock#2 ->swap_avail_lock ->&((cluster_info + ci)->lock)#2 ->batched_entropy_u32.lock FD: 1 BD: 158 +.+.: swap_avail_lock FD: 1 BD: 15 ....: proc_poll_wait.lock FD: 321 BD: 1 +.+.: swap_slots_cache_enable_mutex ->cpu_hotplug_lock ->swap_lock FD: 1 BD: 219 +.+.: swap_slots_cache_mutex FD: 1 BD: 73 +.-.: &sch->root_lock_key#3 FD: 1 BD: 72 +.-.: &r->producer_lock FD: 196 BD: 153 .+.+: sb_pagefaults ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->mapping.invalidate_lock ->&c->lock ->remove_cache_srcu ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&sb->s_type->i_lock_key#23 ->&wb->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->quarantine_lock FD: 207 BD: 1 +.+.: pid_caches_mutex ->slab_mutex FD: 45 BD: 1 +.+.: &type->s_umount_key#47 ->sb_lock ->&dentry->d_lock FD: 151 BD: 2 ++++: &sb->s_type->i_mutex_key#17 ->namespace_sem ->&dentry->d_lock ->tk_core.seq.seqcount ->rcu_node_0 ->&rq->__lock FD: 11 BD: 35 ++++: hci_sk_list.lock ->rlock-AF_BLUETOOTH ->&c->lock ->pool_lock#2 ->tk_core.seq.seqcount ->&obj_hash[i].lock FD: 238 BD: 1 +.+.: &data->open_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#8 ->&____s->seqcount ->hci_index_ida.xa_lock ->wq_pool_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&n->list_lock ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->subsys mutex#74 ->&lock->wait_lock ->&rq->__lock ->&dev->devres_lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&rfkill->lock ->hci_dev_list_lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->(pm_chain_head).rwsem ->&list->lock#7 ->&data->read_wait ->remove_cache_srcu ->&____s->seqcount#2 FD: 1 BD: 15 ....: hci_index_ida.xa_lock FD: 1 BD: 212 +.+.: uevent_sock_mutex.wait_lock FD: 31 BD: 30 +.+.: subsys mutex#74 ->&rq->__lock ->&lock->wait_lock ->&k->k_lock FD: 14 BD: 26 ++++: hci_dev_list_lock ->pool_lock#2 ->tk_core.seq.seqcount ->rlock-AF_BLUETOOTH ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock FD: 2 BD: 2 ....: queue_list_lock ->&q->owner_lock FD: 554 BD: 19 +.+.: (work_completion)(&hdev->power_on) ->&hdev->req_lock ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&hdev->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 553 BD: 21 +.+.: &hdev->req_lock ->&obj_hash[i].lock ->pool_lock#2 ->&list->lock#5 ->&list->lock#6 ->&hdev->req_wait_q ->&base->lock ->&rq->__lock ->&c->lock ->(&timer.timer) ->&____s->seqcount ->tk_core.seq.seqcount ->hci_sk_list.lock ->&cfs_rq->removed.lock ->hci_dev_list_lock ->&wq->mutex ->&hdev->lock ->&msft->filter_lock ->&list->lock#7 ->&n->list_lock ->rcu_node_0 ->&____s->seqcount#2 ->&meta->lock ->kfence_freelist_lock ->(console_sem).lock ->&buf->lock ->&port->buf.lock/1 ->&hu->proto_lock FD: 1 BD: 22 ....: &list->lock#5 FD: 1 BD: 31 ....: &list->lock#6 FD: 29 BD: 29 ....: &hdev->req_wait_q ->&p->pi_lock FD: 1 BD: 30 ....: &list->lock#7 FD: 29 BD: 9 ....: &data->read_wait ->&p->pi_lock FD: 1 BD: 68 +...: &r->consumer_lock#3 FD: 81 BD: 6 +.+.: (work_completion)(&hdev->cmd_work) ->&list->lock#6 ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->&list->lock#7 ->&obj_hash[i].lock ->&data->read_wait ->&c->lock ->&n->list_lock ->&____s->seqcount ->remove_cache_srcu ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->rcu_node_0 ->hci_sk_list.lock ->&____s->seqcount#2 ->&hu->proto_lock FD: 1 BD: 3 +.+.: &fi->rdc.lock FD: 60 BD: 4 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_HCI ->slock-AF_BLUETOOTH-BTPROTO_HCI ->sock_cookie_ida.xa_lock ->&p->alloc_lock ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->clock-AF_BLUETOOTH ->&c->lock ->&n->list_lock ->&rq->__lock ->rlock-AF_BLUETOOTH ->hci_dev_list_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 5 +...: slock-AF_BLUETOOTH-BTPROTO_HCI FD: 1 BD: 5 ....: sock_cookie_ida.xa_lock FD: 45 BD: 5 +.+.: tcp_exit_batch_mutex ->&rq->__lock ->&hashinfo->ehash_locks[i] ->(&tw->tw_timer) ->&obj_hash[i].lock ->&base->lock ->&tcp_hashinfo.bhash[i].lock FD: 1 BD: 67 +...: &net->xfrm.xfrm_state_lock FD: 1 BD: 67 +...: xfrm_state_dev_gc_lock FD: 196 BD: 6 +.+.: (work_completion)(&hdev->rx_work) ->&list->lock#6 ->lock#6 ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->init_mm.page_table_lock ->&hdev->lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->&obj_hash[i].lock ->&hdev->req_wait_q ->&c->lock ->&base->lock ->chan_list_lock ->quarantine_lock ->&n->list_lock ->&meta->lock ->kfence_freelist_lock ->hci_sk_list.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->batched_entropy_u8.lock ->remove_cache_srcu FD: 193 BD: 28 +.+.: &hdev->lock ->&xa->xa_lock#17 ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#8 ->&c->lock ->&n->list_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&k->k_lock ->subsys mutex#74 ->&list->lock#6 ->&hdev->unregister_lock ->&____s->seqcount ->hci_cb_list_lock ->&base->lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->&hdev->cmd_sync_work_lock ->pool_lock ->&sem->wait_lock ->&p->pi_lock ->&x->wait#2 ->&rq->__lock ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->hci_cb_list_lock.wait_lock ->quarantine_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->remove_cache_srcu ->uevent_sock_mutex.wait_lock FD: 1 BD: 1163 -.-.: &nna->lock FD: 10 BD: 29 ....: &xa->xa_lock#17 ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 35 BD: 1 ..-.: &(&net->ipv6.addr_chk_work)->timer FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#44 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 1 BD: 1 +.+.: &futex_queues[i].lock FD: 1 BD: 6 +...: clock-AF_BLUETOOTH FD: 79 BD: 29 +.+.: &hdev->unregister_lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->&hdev->cmd_sync_work_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 28 BD: 30 +.+.: &hdev->cmd_sync_work_lock ->&rq->__lock FD: 1 BD: 30 +.+.: &conn->ident_lock FD: 1 BD: 31 ....: &list->lock#8 FD: 1 BD: 31 +.+.: &conn->chan_lock FD: 554 BD: 6 +.+.: (work_completion)(&hdev->cmd_sync_work) ->&hdev->cmd_sync_work_lock ->&hdev->req_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 33 BD: 6 +.+.: (work_completion)(&hdev->tx_work) ->&list->lock#8 ->tk_core.seq.seqcount ->&list->lock#7 ->&data->read_wait ->&list->lock#6 ->&rq->__lock FD: 2 BD: 6 +.+.: (work_completion)(&conn->pending_rx_work) ->&list->lock#9 FD: 1 BD: 7 ....: &list->lock#9 FD: 1 BD: 1 +.+.: &undo_list->lock FD: 1 BD: 67 +...: &nr_netdev_addr_lock_key FD: 1 BD: 67 +...: listen_lock FD: 66 BD: 68 +.-.: (&peer->timer_new_handshake) ->&peer->endpoint_lock FD: 1 BD: 5 +...: &tn->node_list_lock FD: 2 BD: 6 +.+.: rdma_nets.xa_lock ->pool_lock#2 FD: 1 BD: 4 +.+.: &____s->seqcount#10 FD: 2 BD: 3 +.+.: &(&net->ipv4.ping_group_range.lock)->lock ->&____s->seqcount#10 FD: 2 BD: 67 +.+.: &r->consumer_lock ->&r->producer_lock#2 FD: 1 BD: 96 +.-.: &r->producer_lock#2 FD: 47 BD: 77 +...: &dev_addr_list_lock_key#5 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&n->list_lock ->(console_sem).lock FD: 11 BD: 76 +...: &dev_addr_list_lock_key#6 ->&c->lock ->pool_lock#2 ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock FD: 40 BD: 74 +.-.: &br->hash_lock ->&____s->seqcount ->&c->lock ->pool_lock#2 ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&____s->seqcount#2 ->&n->list_lock ->&base->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 80 BD: 68 +.+.: j1939_netdev_lock ->fs_reclaim ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&net->can.rcvlists_lock ->&obj_hash[i].lock ->&priv->lock ->&rq->__lock FD: 9 BD: 76 +...: &dev_addr_list_lock_key#7 ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 7 BD: 67 +...: &bat_priv->tvlv.handler_list_lock ->pool_lock#2 ->&c->lock FD: 12 BD: 76 +...: &bat_priv->tvlv.container_list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 19 BD: 76 +...: &batadv_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&obj_hash[i].lock ->krc.lock FD: 10 BD: 78 +...: &bat_priv->softif_vlan_list_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 FD: 20 BD: 77 +...: key#15 ->&bat_priv->softif_vlan_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 4 BD: 76 +...: &bat_priv->tt.changes_list_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 47 BD: 93 +...: &dev_addr_list_lock_key#8 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->(console_sem).lock ->&____s->seqcount#2 ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 35 BD: 1 ..-.: &(&bat_priv->nc.work)->timer FD: 65 BD: 1 +.+.: (wq_completion)bat_events ->(work_completion)(&(&bat_priv->nc.work)->work) ->(work_completion)(&(&bat_priv->mcast.work)->work) ->(work_completion)(&(&bat_priv->orig_work)->work) ->(work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->(work_completion)(&(&bat_priv->tt.work)->work) ->(work_completion)(&(&bat_priv->dat.work)->work) ->(work_completion)(&(&bat_priv->bla.work)->work) ->&rq->__lock ->(work_completion)(&barr->work) FD: 35 BD: 6 +.+.: (work_completion)(&(&bat_priv->nc.work)->work) ->key#16 ->key#17 ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->pool_lock#2 ->&rcu_state.gp_wq ->&cfs_rq->removed.lock FD: 1 BD: 7 +...: key#16 FD: 1 BD: 7 +...: key#17 FD: 207 BD: 68 +.+.: init_lock ->slab_mutex ->fs_reclaim ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&base->lock ->crngs.lock ->&rq->__lock FD: 1 BD: 91 +.-.: deferred_lock FD: 1088 BD: 2 +.+.: deferred_process_work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 58 BD: 71 +.-.: &br->lock ->&br->hash_lock ->lweventlist_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 ->deferred_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&c->lock ->&____s->seqcount ->nl_table_lock ->nl_table_wait.lock ->&br->multicast_lock ->&n->list_lock FD: 1 BD: 70 +.+.: &bond->stats_lock/1 FD: 442 BD: 3 +.+.: &type->i_mutex_dir_key#12/1 ->&sem->wait_lock ->&rq->__lock ->&sb->s_type->i_lock_key#40 ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_mutex_key#26 ->&fsnotify_mark_srcu ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&fiq->lock ->inode_hash_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 38 BD: 7 +.+.: (work_completion)(&(&slave->notify_work)->work) ->&obj_hash[i].lock ->&base->lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 35 BD: 1 ..-.: &(&slave->notify_work)->timer FD: 47 BD: 20 +.+.: &type->i_mutex_dir_key#5/2 ->&simple_offset_lock_class ->tk_core.seq.seqcount ->rename_lock FD: 1 BD: 192 +.+.: btrfs-extent-00/3 FD: 474 BD: 1 +.+.: &newdev->mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#8 ->&mm->mmap_lock ->&____s->seqcount ->&c->lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#30 ->(console_sem).lock ->input_mutex FD: 1 BD: 6 ....: &fs_info->balance_wait_q FD: 42 BD: 72 +.+.: &hard_iface->bat_iv.ogm_buff_mutex ->crngs.lock ->pool_lock#2 ->batched_entropy_u8.lock ->&bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->rcu_node_0 ->&rq->__lock ->&bat_priv->tt.commit_lock ->&bat_priv->tvlv.container_list_lock ->&____s->seqcount#2 ->&rcu_state.gp_wq ->&rcu_state.expedited_wq ->kfence_freelist_lock ->&cfs_rq->removed.lock FD: 15 BD: 73 +...: &bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 67 +...: _xmit_NONE FD: 1 BD: 67 +...: lock#9 FD: 1 BD: 68 ...-: &____s->seqcount#11 FD: 35 BD: 1 ..-.: &(&bat_priv->mcast.work)->timer FD: 40 BD: 2 +.+.: (work_completion)(&(&bat_priv->mcast.work)->work) ->&rq->__lock ->pool_lock#2 ->&bat_priv->mcast.mla_lock ->&obj_hash[i].lock ->&base->lock ->kfence_freelist_lock ->rcu_node_0 ->&meta->lock ->&rcu_state.expedited_wq ->quarantine_lock FD: 24 BD: 3 +.+.: &bat_priv->mcast.mla_lock ->pool_lock#2 ->key#15 ->&bat_priv->tt.changes_list_lock ->&bat_priv->tvlv.container_list_lock ->&obj_hash[i].lock ->&meta->lock ->kfence_freelist_lock ->&c->lock ->&n->list_lock FD: 1 BD: 80 +.-.: &hsr->list_lock FD: 10 BD: 76 +...: &dev_addr_list_lock_key#9 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&n->list_lock FD: 18 BD: 67 +.-.: (&app->join_timer) ->&app->lock ->&list->lock#10 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 68 +.-.: &app->lock FD: 1 BD: 68 ..-.: &list->lock#10 FD: 17 BD: 67 +.-.: (&app->join_timer)#2 ->&app->lock#2 ->&list->lock#11 FD: 15 BD: 69 +.-.: &app->lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 68 ..-.: &list->lock#11 FD: 35 BD: 1 ..-.: &(&bat_priv->orig_work)->timer FD: 28 BD: 2 +.+.: (work_completion)(&(&bat_priv->orig_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 10 BD: 76 +...: &macvlan_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock FD: 1 BD: 69 +...: &qdisc_xmit_lock_key FD: 1 BD: 2 ....: &q->midi_sleep FD: 1 BD: 3 ....: &q->owner_lock FD: 83 BD: 2 +.+.: &type->i_mutex_dir_key#16 ->&rq->__lock ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&dentry->d_lock ->tomoyo_ss FD: 43 BD: 407 +.+.: &sb->s_type->i_lock_key#48 ->&dentry->d_lock FD: 1 BD: 1 ....: key#40 FD: 1 BD: 5 ....: (&priv->tlist) FD: 9 BD: 399 +.-.: prog_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 12 BD: 399 +...: link_idr_lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 69 +...: device_spinlock FD: 29 BD: 1 -...: &x->wait#47 ->&p->pi_lock FD: 98 BD: 171 +.+.: &dqp->q_qlock ->&lru->node[i].lock ->&x->wait#28 ->semaphore->lock#2 ->&wq->mutex ->&cil->xc_push_lock ->&rq->__lock ->&x->wait#10 ->&log->l_icloglock ->semaphore->lock#3 ->tk_core.seq.seqcount ->&fq->mq_flush_lock ->&bp->b_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&xfs_dquot_group_class ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&ailp->ail_lock ->&cil->xc_ctx_lock ->&dqp->q_qlock/1 ->remove_cache_srcu FD: 30 BD: 7 +.+.: &m->req_lock ->&req->wq FD: 29 BD: 8 ....: &req->wq ->&p->pi_lock FD: 110 BD: 6 +.+.: &jfs_ip->commit_mutex/1 ->&jfs_ip->xattr_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->jfsTxnLock ->&sb->s_type->i_lock_key#50 ->&wb->list_lock ->tk_core.seq.seqcount ->lock#4 ->&mp->wait ->&(log)->loglock ->&(log)->gclock ->&bmp->db_bmaplock ->&jfs_ip->rdwrlock#2/2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->&folio_wait_table[i] ->&rq->__lock FD: 111 BD: 5 +.+.: &jfs_ip->commit_mutex ->&jfs_ip->commit_mutex/1 FD: 95 BD: 5 .+.+: ehci_cf_port_reset_rwsem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&c->lock ->hcd_root_hub_lock ->fs_reclaim ->&dum_hcd->dum->lock ->&queue->lock ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#19 ->&base->lock ->(&timer.timer) ->&hub->status_mutex ->device_state_lock FD: 28 BD: 7 +.+.: usbfs_mutex ->&rq->__lock FD: 1 BD: 5 +.+.: &sn->gssp_lock FD: 1 BD: 8 +.+.: &cd->hash_lock FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg2#34 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#34 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 106 BD: 3 +.+.: &port_dev->status_lock ->&hub->status_mutex ->hcd->address0_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&c->lock ->hcd_root_hub_lock ->fs_reclaim ->&dum_hcd->dum->lock ->&obj_hash[i].lock ->&x->wait#19 ->&base->lock ->&rq->__lock ->(&timer.timer) FD: 93 BD: 3 +.+.: fsverity_hash_alg_init_mutex ->crypto_alg_sem ->fs_reclaim ->&c->lock ->pool_lock#2 ->(console_sem).lock FD: 1 BD: 67 ....: (&mrt->ipmr_expire_timer) FD: 1 BD: 5 ....: (&ipvs->dest_trash_timer) FD: 1 BD: 67 +...: &sch->root_lock_key#801 FD: 187 BD: 67 +.+.: team->team_lock_key#23 ->fs_reclaim ->&c->lock ->netpoll_srcu ->net_rwsem ->&tn->lock ->&dev_addr_list_lock_key#8 ->&dir->lock#2 ->input_pool.lock ->&n->list_lock ->&rq->__lock ->netdev_rename_lock.seqcount ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock FD: 3 BD: 67 ....: &dtab->index_lock ->stock_lock ->pool_lock#2 FD: 1 BD: 4 ....: &(&sbi->mdb_work)->timer FD: 1 BD: 2 +...: slock-AF_IEEE802154 FD: 30 BD: 1 +.+.: sk_lock-AF_IEEE802154 ->&rq->__lock ->slock-AF_IEEE802154 ->rcu_node_0 FD: 1 BD: 5 +.+.: quirk_mutex FD: 93 BD: 6 +.+.: &hdev->ll_open_lock ->&usbhid->mutex FD: 88 BD: 67 +.+.: bpf_dispatcher_xdp.mutex ->pack_mutex ->fs_reclaim ->pool_lock#2 ->&vn->pool_lock ->&vn->busy.lock ->&rq->__lock ->&____s->seqcount ->bpf_lock ->&obj_hash[i].lock ->&x->wait#2 FD: 1 BD: 67 +...: &sch->root_lock_key#767 FD: 96 BD: 2 +.+.: &sb->s_type->i_mutex_key#30/1 ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->pool_lock#2 ->&sbi->lookup_lock ->tomoyo_ss ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#46 FD: 1 BD: 67 +...: &sch->root_lock_key#760 FD: 1 BD: 1 +.+.: &mousedev->client_lock FD: 451 BD: 1 +.+.: &runtime->oss.params_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&loopback->cable_lock ->&obj_hash[i].lock ->&rq->__lock ->&____s->seqcount ->&runtime->buffer_mutex ->&group->lock#2 ->snd_pcm_link_rwsem ->&n->list_lock ->&mm->mmap_lock ->&runtime->sleep ->&____s->seqcount#2 FD: 91 BD: 3 +.+.: &runtime->buffer_mutex ->&group->lock#2 ->fs_reclaim ->pool_lock#2 ->&loopback->cable_lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&rq->__lock ->&card->memory_mutex ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->init_mm.page_table_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->pm_qos_lock ->(&dpcm->timer) ->&base->lock FD: 92 BD: 2 .+.+: snd_pcm_link_rwsem ->&runtime->buffer_mutex FD: 35 BD: 5 +.-.: (&dpcm->timer) ->&cable->lock ->&group->lock#2 FD: 81 BD: 1 .+.+: kn->active#50 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] FD: 85 BD: 1 ++++: kn->active#51 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&cgrp->pidlist_mutex FD: 1 BD: 2 ....: vga_user_lock FD: 1 BD: 1 +.+.: multicast_table.xa_lock FD: 44 BD: 8 ....: &usbhid->lock ->pool_lock#2 ->&dum_hcd->dum->lock FD: 80 BD: 172 +.+.: &xfs_dquot_group_class ->&lru->node[i].lock ->&x->wait#28 ->semaphore->lock#2 ->&wq->mutex ->&cil->xc_push_lock ->&log->l_icloglock ->&bp->b_lock ->&c->lock ->&cil->xc_ctx_lock ->&n->list_lock ->&obj_hash[i].lock ->pool_lock#2 ->&ailp->ail_lock ->&rq->__lock FD: 29 BD: 15 ..-.: &lbuf->l_ioevent ->&p->pi_lock FD: 78 BD: 10 +.+.: &(log)->loglock ->&(log)->synclock ->&(log)->gclock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->jfsLCacheLock ->rcu_node_0 ->&sb->s_type->i_lock_key#50 ->&xa->xa_lock#9 ->lock#4 ->lock#5 ->&c->lock FD: 97 BD: 4 +.+.: &(imap->im_aglock[index]) ->&jfs_ip->rdwrlock/1 ->&mp->wait ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#50 ->&wb->list_lock FD: 98 BD: 7 ++++: &jfs_ip->xattr_sem ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#50 ->&wb->list_lock ->&jfs_ip->rdwrlock#2/2 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->&mp->wait ->rcu_node_0 ->&obj_hash[i].lock ->jfsTxnLock FD: 35 BD: 1 ..-.: drivers/net/wireguard/ratelimiter.c:20 FD: 30 BD: 2 +.+.: (gc_work).work ->tk_core.seq.seqcount ->"ratelimiter_table_lock" ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 FD: 1 BD: 3 +.+.: "ratelimiter_table_lock" FD: 9 BD: 76 +...: &dev_addr_list_lock_key#10 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock FD: 1 BD: 67 ....: &xa->xa_lock#18 FD: 79 BD: 5 +.+.: &loopback->cable_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&cable->lock ->&rq->__lock ->&c->lock ->&n->list_lock FD: 19 BD: 76 +...: &dev_addr_list_lock_key#10/1 ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&c->lock ->&n->list_lock FD: 2 BD: 67 +.+.: &tap_major->minor_lock ->pool_lock#2 FD: 30 BD: 67 +.+.: subsys mutex#75 ->&k->k_lock ->&rq->__lock FD: 20 BD: 76 +...: &dev_addr_list_lock_key#11 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 ->&n->list_lock FD: 1 BD: 14 ....: (&sl->outfill_timer) FD: 1108 BD: 1 .+.+: kn->active#45 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->nsim_bus_dev_list_lock ->&c->lock ->nsim_bus_dev_list_lock.wait_lock ->&p->pi_lock ->&rq->__lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1106 BD: 9 +.+.: nsim_bus_dev_list_lock ->fs_reclaim ->pool_lock#2 ->nsim_bus_dev_ids.xa_lock ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->&c->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->device_links_lock ->&rq->__lock ->nsim_bus_dev_list_lock.wait_lock ->deferred_probe_mutex ->subsys mutex#76 ->rcu_node_0 ->&rcu_state.expedited_wq ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->mmu_notifier_invalidate_range_start ->&n->list_lock ->&sem->wait_lock ->&p->pi_lock ->&____s->seqcount#2 ->&lock->wait_lock FD: 1108 BD: 1 .+.+: kn->active#46 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->nsim_bus_dev_list_lock ->nsim_bus_dev_list_lock.wait_lock ->&p->pi_lock ->&rq->__lock ->&c->lock FD: 1 BD: 10 ....: nsim_bus_dev_ids.xa_lock FD: 2 BD: 20 +.+.: devlinks.xa_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: nfnl_grp_active_lock FD: 11 BD: 20 +.+.: &xa->xa_lock#19 ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock FD: 1 BD: 94 +...: &data->fib_event_queue_lock FD: 82 BD: 2 +.+.: (work_completion)(&data->fib_event_work) ->&data->fib_event_queue_lock ->&data->fib_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 80 BD: 3 +.+.: &data->fib_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->&c->lock ->&____s->seqcount ->pool_lock ->&n->list_lock ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->quarantine_lock ->&meta->lock FD: 1 BD: 20 ....: &(&fn_net->fib_chain)->lock FD: 80 BD: 72 +.+.: bpf_devs_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->rcu_node_0 ->&rq->__lock ->&obj_hash[i].lock ->&____s->seqcount ->&n->list_lock FD: 1 BD: 67 +...: &devlink_port->type_lock FD: 1 BD: 10 +.+.: nsim_bus_dev_list_lock.wait_lock FD: 1 BD: 67 +.+.: &vn->sock_lock FD: 35 BD: 1 ..-.: &(&forw_packet_aggr->delayed_work)->timer FD: 47 BD: 2 +.+.: (work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->&hard_iface->bat_iv.ogm_buff_mutex ->&bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->quarantine_lock ->&meta->lock FD: 28 BD: 10 +.+.: subsys mutex#76 ->&rq->__lock FD: 1 BD: 43 ....: &vblank->work_wait_queue FD: 35 BD: 1 ..-.: &(&nsim_dev->trap_data->trap_report_dw)->timer FD: 36 BD: 2 +.+.: (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 20 BD: 20 +.+.: &nsim_trap_data->trap_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->crngs.lock ->&nsim_dev->fa_cookie_lock ->&obj_hash[i].lock ->&n->list_lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->&base->lock FD: 1 BD: 21 +...: &nsim_dev->fa_cookie_lock FD: 35 BD: 1 ..-.: &(&hwstats->traffic_dw)->timer FD: 32 BD: 2 +.+.: (work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 31 BD: 73 +.+.: &hwstats->hwsdev_list_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 656 BD: 67 +.+.: &wg->device_update_lock ->&rq->__lock ->&wg->static_identity.lock ->fs_reclaim ->&____s->seqcount ->&c->lock ->pool_lock#2 ->pcpu_alloc_mutex ->&handshake->lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->bh_lock ->&table->lock ->&peer->endpoint_lock ->pool_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->mmu_notifier_invalidate_range_start ->&dir->lock ->k-slock-AF_INET/1 ->k-sk_lock-AF_INET ->k-slock-AF_INET#2 ->cpu_hotplug_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&wg->socket_update_lock ->rcu_node_0 ->&list->lock#14 ->&rnp->exp_wq[2] ->&rnp->exp_wq[3] ->&zone->lock ->&rnp->exp_wq[1] ->&n->list_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&x->wait#2 ->&table->hash[i].lock ->k-clock-AF_INET ->&sb->s_type->i_lock_key#9 ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->k-clock-AF_INET6 ->(&peer->timer_retransmit_handshake) ->&base->lock ->(&peer->timer_send_keepalive) ->(&peer->timer_new_handshake) ->(&peer->timer_zero_key_material) ->(&peer->timer_persistent_keepalive) ->&peer->keypairs.keypair_update_lock ->&wq->mutex ->napi_hash_lock ->&table->lock#2 ->wq_pool_mutex ->wq_mayday_lock ->&p->pi_lock ->&x->wait ->pcpu_lock ->&r->consumer_lock#2 ->rcu_state.barrier_mutex ->init_lock ->rcu_state.exp_mutex.wait_lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->rcu_state.barrier_mutex.wait_lock ->&rcu_state.gp_wq ->remove_cache_srcu FD: 77 BD: 114 ++++: &wg->static_identity.lock ->&rq->__lock ->&handshake->lock ->&sem->wait_lock ->&p->pi_lock FD: 75 BD: 115 ++++: &handshake->lock ->crngs.lock ->&rq->__lock ->tk_core.seq.seqcount ->&table->lock#2 ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->remove_cache_srcu ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&sem->wait_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 28 BD: 68 +.+.: &table->lock ->&rq->__lock FD: 65 BD: 117 ++-.: &peer->endpoint_lock ->pool_lock#2 ->&obj_hash[i].lock ->&____s->seqcount FD: 1 BD: 67 +...: &dev_addr_list_lock_key#12 FD: 1 BD: 67 +...: &bpq_netdev_addr_lock_key FD: 19 BD: 76 +...: &dev_addr_list_lock_key#6/1 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 44 BD: 67 +.-.: (&brmctx->ip6_own_query.timer) ->&br->multicast_lock FD: 43 BD: 90 +.-.: &br->multicast_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 ->deferred_lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock FD: 44 BD: 67 +.-.: (&brmctx->ip4_own_query.timer) ->&br->multicast_lock FD: 358 BD: 1 +.+.: &type->s_umount_key#78/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->&c->lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->(console_sem).lock ->&____s->seqcount#2 ->&xa->xa_lock#5 ->bit_wait_table + i ->&sb->s_type->i_lock_key#47 ->&dentry->d_lock FD: 65 BD: 1 +.-.: (&in_dev->mr_ifc_timer) ->&obj_hash[i].lock ->batched_entropy_u32.lock ->&base->lock FD: 1 BD: 7 +.+.: genl_mutex.wait_lock FD: 1 BD: 69 +.+.: tcpv6_prot_mutex FD: 1 BD: 67 +...: &dev_addr_list_lock_key#13 FD: 11 BD: 70 +.-.: &qdisc_xmit_lock_key#2 ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock FD: 35 BD: 1 ..-.: &(&br->gc_work)->timer FD: 17 BD: 67 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 ->&obj_hash[i].lock ->pool_lock#2 ->&qdisc_xmit_lock_key#3 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 42 BD: 2 +.+.: (work_completion)(&(&br->gc_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 FD: 1 BD: 67 +...: &dev_addr_list_lock_key#14 FD: 35 BD: 1 ..-.: &(&hdev->cmd_timer)->timer FD: 48 BD: 6 +.+.: (work_completion)(&(&hdev->cmd_timer)->work) ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->rcu_node_0 ->&hdev->req_wait_q FD: 48 BD: 77 +...: &dev_addr_list_lock_key/1 ->&dev_addr_list_lock_key#8 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 ->pool_lock#2 ->&n->list_lock FD: 208 BD: 4 ++++: &sb->s_type->i_mutex_key#20/4 ->mapping.invalidate_lock#3 FD: 205 BD: 158 ++++: mapping.invalidate_lock#3/1 ->&xfs_nondir_ilock_class#3 ->tk_core.seq.seqcount ->fs_reclaim ->pool_lock#2 ->sb_internal#2 ->&obj_hash[i].lock ->&c->lock ->&mapping->i_mmap_rwsem ->&ip->i_flags_lock ->&sb->s_type->i_lock_key#35 ->&wb->list_lock ->&xa->xa_lock#9 ->mmu_notifier_invalidate_range_start ->&ifs->state_lock ->lock#4 ->lock#5 ->&folio_wait_table[i] ->&rq->__lock FD: 1 BD: 67 +...: &sch->root_lock_key#811 FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#21 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 37 BD: 68 +.+.: &tp->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 67 +.+.: &head->masks_lock FD: 80 BD: 2 +.+.: (work_completion)(&(rwork)->work) ->&obj_hash[i].lock ->pool_lock#2 ->&ht->mutex FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#42 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 1 BD: 589 +.+.: &dquot->dq_dqb_lock FD: 78 BD: 12 +.+.: fscrypt_init_mutex ->fs_reclaim ->pool_lock#2 ->&____s->seqcount FD: 94 BD: 12 .+.+: &mk->mk_sem ->crypto_alg_sem ->fs_reclaim ->&c->lock ->pool_lock#2 ->(console_sem).lock ->&mk->mk_decrypted_inodes_lock ->&rq->__lock ->&n->list_lock FD: 1 BD: 16 +.+.: &mk->mk_decrypted_inodes_lock FD: 38 BD: 183 +.+.: &ea_inode->i_rwsem#8/1 ->mmu_notifier_invalidate_range_start ->&ei->i_raw_lock FD: 3 BD: 183 ++++: &ei->i_data_sem/3 ->key#3 ->key#14 FD: 1 BD: 1 +.-.: x25_forward_list_lock FD: 1 BD: 1 ....: _rs.lock#6 FD: 48 BD: 77 +...: &dev_addr_list_lock_key#7/1 ->&dev_addr_list_lock_key#8 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock ->pool_lock#2 FD: 14 BD: 80 +.-.: &qdisc_xmit_lock_key#3 ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount FD: 15 BD: 67 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#3 ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 44 BD: 1 +.-.: (&pmctx->ip6_own_query.timer) ->&br->multicast_lock FD: 44 BD: 1 +.-.: (&pmctx->ip4_own_query.timer) ->&br->multicast_lock FD: 2 BD: 216 ..-.: &list->lock#12 ->process_queue_bh_lock FD: 1 BD: 217 +.-.: process_queue_bh_lock FD: 16 BD: 67 +.-.: (&app->periodic_timer) ->&app->lock#2 FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#4 FD: 317 BD: 2 +.+.: ((tcp_md5_needed).work).work ->cpu_hotplug_lock FD: 48 BD: 76 +...: _xmit_ETHER/1 ->&dev_addr_list_lock_key#8 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->pool_lock#2 ->&n->list_lock FD: 25 BD: 78 +.-.: &hsr->seqnr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&meta->lock ->kfence_freelist_lock ->&base->lock FD: 1 BD: 79 +.-.: &new_node->seq_out_lock FD: 26 BD: 67 +.-.: (&hsr->announce_timer) FD: 1 BD: 67 +.+.: &nn->netlink_tap_lock FD: 19 BD: 76 +...: &batadv_netdev_addr_lock_key/1 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 49 BD: 76 +...: &dev_addr_list_lock_key#9/1 ->&dev_addr_list_lock_key#8 ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&dev_addr_list_lock_key ->&c->lock ->&n->list_lock FD: 49 BD: 76 +...: &macvlan_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key#8 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&n->list_lock ->&____s->seqcount#2 ->&dev_addr_list_lock_key#5 ->pool_lock#2 FD: 19 BD: 68 +...: &ipvlan->addrs_lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock FD: 1 BD: 3 +.-.: &list->lock#13 FD: 34 BD: 2 +.+.: (work_completion)(&port->bc_work) ->&list->lock#13 ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&rq->__lock ->rcu_node_0 ->&meta->lock ->kfence_freelist_lock FD: 48 BD: 76 +...: &macsec_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key#8 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&____s->seqcount#2 ->&n->list_lock FD: 19 BD: 79 +.-.: key#18 ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 25 BD: 73 +...: &bat_priv->tt.commit_lock ->key#15 ->&bat_priv->softif_vlan_list_lock ->&bat_priv->tt.changes_list_lock ->&bat_priv->tt.last_changeset_lock ->pool_lock#2 ->&bat_priv->tvlv.container_list_lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 9 BD: 74 +...: &bat_priv->tt.last_changeset_lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 15 BD: 67 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#4 FD: 1 BD: 79 +.-.: &entry->crc_lock FD: 28 BD: 68 +.+.: &wg->socket_update_lock ->&rq->__lock FD: 4 BD: 101 +.-.: &list->lock#14 ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 69 +...: &this->info_list_lock FD: 77 BD: 16 +.+.: (work_completion)(&peer->transmit_handshake_work) ->tk_core.seq.seqcount ->&wg->static_identity.lock ->&cookie->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&peer->endpoint_lock ->batched_entropy_u8.lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->&____s->seqcount#2 ->kfence_freelist_lock FD: 1 BD: 117 +...: &table->lock#2 FD: 28 BD: 47 ++++: &cookie->lock ->&rq->__lock FD: 1 BD: 67 +...: &sch->root_lock_key#768 FD: 1 BD: 4 ....: &rs->rs_recv_lock FD: 1 BD: 101 +.-.: &r->producer_lock#3 FD: 111 BD: 30 +.+.: (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->&r->consumer_lock#2 ->&wg->static_identity.lock ->&peer->endpoint_lock ->tk_core.seq.seqcount ->&cookie->lock ->&handshake->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&list->lock#14 ->&c->lock ->&____s->seqcount ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->&n->list_lock FD: 1 BD: 98 +.+.: &r->consumer_lock#2 FD: 5 BD: 116 +.-.: &peer->keypairs.keypair_update_lock ->&table->lock#2 ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 4 +...: clock-AF_RDS FD: 28 BD: 16 +.+.: (work_completion)(&peer->transmit_packet_work) ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&peer->endpoint_lock ->batched_entropy_u8.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 67 +...: &sch->root_lock_key#775 FD: 1 BD: 1 +.-.: &keypair->receiving_counter.lock FD: 31 BD: 67 +.+.: &caifn->caifdevs.lock ->&obj_hash[i].lock ->&x->wait#2 ->&rq->__lock ->pool_lock#2 ->&this->info_list_lock FD: 81 BD: 1 +.+.: (wq_completion)inet_frag_wq ->(work_completion)(&fqdir->destroy_work) FD: 1 BD: 5 +...: &bat_priv->gw.list_lock FD: 1 BD: 5 +.+.: &hn->hn_lock FD: 1 BD: 5 +.+.: &pnetids_ndev->lock FD: 1 BD: 28 +.+.: &data->mtx FD: 1 BD: 67 +...: &sch->root_lock_key#786 FD: 1 BD: 5 +...: key#35 FD: 28 BD: 5 +.+.: &bat_priv->bat_v.ogm_buff_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 71 ....: &list->lock#26 FD: 2 BD: 5 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 ->slock-AF_BLUETOOTH-BTPROTO_L2CAP FD: 1 BD: 67 +...: &sch->root_lock_key#750 FD: 186 BD: 3 +.+.: &type->i_mutex_dir_key#11/1 ->rename_lock.seqcount ->fs_reclaim ->&c->lock ->stock_lock ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->tk_core.seq.seqcount ->&folio_wait_table[i] ->&rq->__lock ->tomoyo_ss ->&s->s_inode_list_lock ->&nm_i->nid_list_lock ->batched_entropy_u32.lock ->inode_hash_lock ->&sbi->cp_rwsem ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#41 FD: 1 BD: 67 +...: &sch->root_lock_key#734 FD: 1 BD: 1 +.+.: &sbi->quota_sem FD: 1 BD: 72 ....: &wdev->event_lock FD: 1 BD: 72 ....: (&dwork->timer) FD: 1 BD: 72 ....: (&dwork->timer)#2 FD: 1 BD: 73 ..-.: &list->lock#15 FD: 1 BD: 72 +.-.: &ifibss->incomplete_lock FD: 1090 BD: 1 +.+.: (wq_completion)cfg80211 ->(work_completion)(&rdev->event_work) ->(work_completion)(&(&rdev->dfs_update_channels_wk)->work) ->(work_completion)(&barr->work) FD: 451 BD: 2 +.+.: (work_completion)(&rdev->event_work) ->&rdev->wiphy.mtx ->&lock->wait_lock ->&p->pi_lock FD: 36 BD: 2 +.+.: wireless_nlevent_work ->net_rwsem ->&rq->__lock FD: 1 BD: 103 +.-.: &local->active_txq_lock[i] FD: 43 BD: 102 +.-.: &local->handle_wake_tx_queue_lock ->&local->active_txq_lock[i] ->&local->queue_stop_reason_lock ->&fq->lock ->tk_core.seq.seqcount ->hwsim_radio_lock ->&list->lock#16 FD: 1 BD: 103 ..-.: &local->queue_stop_reason_lock FD: 1 BD: 105 ..-.: &list->lock#16 FD: 38 BD: 1 +.-.: &local->rx_path_lock ->&obj_hash[i].lock ->pool_lock#2 ->&list->lock#15 ->&rdev->wiphy_work_lock ->&rdev->mgmt_registrations_lock FD: 19 BD: 72 +...: &sta->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock FD: 19 BD: 72 +.-.: &sta->rate_ctrl_lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock FD: 39 BD: 1 +.+.: (wq_completion)bond0#21 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 1 BD: 2 +.+.: &iopt->iova_rwsem FD: 124 BD: 1 +.+.: &type->s_umount_key#48/1 ->fs_reclaim ->&c->lock ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#33 ->binderfs_minors_mutex ->&dentry->d_lock ->&sb->s_type->i_mutex_key#18 ->&____s->seqcount#2 ->&rq->__lock ->rcu_node_0 ->&n->list_lock FD: 43 BD: 4 +.+.: &sb->s_type->i_lock_key#33 ->&dentry->d_lock FD: 29 BD: 3 +.+.: binderfs_minors_mutex ->binderfs_minors.xa_lock ->&rq->__lock FD: 1 BD: 4 ....: binderfs_minors.xa_lock FD: 112 BD: 2 +.+.: &sb->s_type->i_mutex_key#18 ->&sb->s_type->i_lock_key#33 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&rq->__lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock FD: 1 BD: 11 +.+.: iunique_lock FD: 833 BD: 2 +.+.: &type->i_mutex_dir_key#6/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->tomoyo_ss ->&root->kernfs_iattr_rwsem ->cgroup_mutex ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock FD: 80 BD: 1 .+.+: kn->active#47 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 80 BD: 1 ++++: kn->active#48 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock ->&rq->__lock FD: 3 BD: 142 ..-.: cgroup_threadgroup_rwsem.rss.gp_wait.lock ->&obj_hash[i].lock FD: 119 BD: 1 +.+.: &type->s_umount_key#76/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&n->list_lock ->&sb->s_type->i_lock_key#46 ->&dentry->d_lock FD: 1 BD: 14 ....: (&sl->keepalive_timer) FD: 43 BD: 5 +.+.: &sb->s_type->i_lock_key#46 ->&dentry->d_lock FD: 78 BD: 68 +.+.: &block->lock ->fs_reclaim ->pool_lock#2 FD: 9 BD: 67 +.+.: &xa->xa_lock#26 ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 101 BD: 1 .+.+: sb_writers#21 ->mount_lock ->&sb->s_type->i_mutex_key#30/1 FD: 1 BD: 275 +.+.: pcpu_alloc_mutex.wait_lock FD: 1 BD: 14 ....: &TxBlock[k].gcwait FD: 29 BD: 142 ....: cgroup_threadgroup_rwsem.waiters.lock ->&p->pi_lock FD: 28 BD: 18 +.+.: (wq_completion)cpuset_migrate_mm ->&rq->__lock FD: 833 BD: 3 +.+.: &type->i_mutex_dir_key#7/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->tomoyo_ss ->&root->kernfs_iattr_rwsem ->cgroup_mutex ->cgroup_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&sem->wait_lock ->pool_lock#2 ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->&c->lock ->rcu_node_0 FD: 85 BD: 1 ++++: kn->active#49 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->stock_lock ->pool_lock#2 ->&c->lock ->&n->list_lock ->&cgrp->pidlist_mutex ->&rq->__lock ->rcu_node_0 FD: 1 BD: 143 ....: cpuset_attach_wq.lock FD: 1 BD: 990 -.-.: stock_lock FD: 1 BD: 76 +.+.: rcu_state.exp_mutex.wait_lock FD: 82 BD: 67 ++++: &block->cb_lock ->flow_indr_block_lock ->&tp->lock FD: 78 BD: 68 +.+.: flow_indr_block_lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 +...: btf_idr_lock FD: 1 BD: 6 ....: &per_cpu(xt_recseq, i) FD: 319 BD: 5 +.+.: nf_nat_proto_mutex ->fs_reclaim ->nf_nat_proto_mutex.wait_lock ->&rq->__lock ->pool_lock#2 ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->stock_lock ->krc.lock ->&n->list_lock ->rcu_node_0 FD: 1 BD: 6 +.+.: nf_nat_proto_mutex.wait_lock FD: 177 BD: 1 +.+.: loop_validate_mutex ->&lo->lo_mutex FD: 1 BD: 77 .+.-: &table->lock#3 FD: 1 BD: 120 +.-.: &nf_nat_locks[i] FD: 35 BD: 1 +.-.: (&sdp->delay_work) FD: 35 BD: 1 ..-.: &(&dm_bufio_cleanup_old_work)->timer FD: 16 BD: 1 +.+.: (wq_completion)dm_bufio_cache ->(work_completion)(&(&dm_bufio_cleanup_old_work)->work) FD: 15 BD: 2 +.+.: (work_completion)(&(&dm_bufio_cleanup_old_work)->work) ->dm_bufio_clients_lock ->&obj_hash[i].lock ->&base->lock FD: 449 BD: 67 +.+.: sk_lock-AF_TIPC ->slock-AF_TIPC ->&tn->nametbl_lock ->&tipc_net(net)->bclock ->&obj_hash[i].lock ->pool_lock#2 ->&srv->idr_lock ->&con->sub_lock ->&list->lock#17 ->rcu_node_0 ->&rq->__lock ->&con->outqueue_lock ->clock-AF_TIPC ->&base->lock ->fs_reclaim ->&c->lock ->&ei->socket.wq.wait ->&mm->mmap_lock ->&n->list_lock FD: 34 BD: 68 +...: slock-AF_TIPC ->&list->lock#17 ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->&list->lock#18 ->&base->lock FD: 1 BD: 77 +...: &nt->cluster_scope_lock FD: 1 BD: 68 +...: &tipc_net(net)->bclock FD: 4 BD: 79 +...: &srv->idr_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 42 BD: 75 +...: &con->sub_lock ->&tn->nametbl_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 37 BD: 78 +...: &sub->lock ->&srv->idr_lock ->pool_lock#2 ->&con->outqueue_lock FD: 4 BD: 80 +...: &con->outqueue_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 75 +...: &list->lock#17 FD: 13 BD: 1 +.+.: (work_completion)(&con->swork) ->&con->outqueue_lock ->pool_lock#2 ->&list->lock#18 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 70 +...: &list->lock#18 FD: 1 BD: 68 +...: clock-AF_TIPC FD: 1 BD: 76 ..-.: key#38 FD: 5 BD: 72 ..-.: &local->ack_status_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 455 BD: 67 +.+.: sk_lock-AF_CAN ->slock-AF_CAN ->j1939_netdev_lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&priv->lock ->&priv->j1939_socks_lock ->&jsk->sk_session_queue_lock ->&mm->mmap_lock ->&____s->seqcount ->&list->lock#19 ->&obj_hash[i].lock ->&priv->active_session_list_lock ->hrtimer_bases.lock ->&rq->__lock ->&____s->seqcount#2 ->&n->list_lock ->&jsk->waitq ->clock-AF_CAN ->rcu_node_0 ->&rcu_state.expedited_wq ->remove_cache_srcu ->&jsk->filters_lock ->&x->wait#2 ->pcpu_lock FD: 1 BD: 68 +...: slock-AF_CAN FD: 1 BD: 69 +...: &net->can.rcvlists_lock FD: 1 BD: 69 ++.-: &priv->lock FD: 34 BD: 69 ++.-: &priv->j1939_socks_lock ->&jsk->filters_lock ->pool_lock#2 ->rlock-AF_CAN ->&jsk->sk_session_queue_lock FD: 31 BD: 69 +.-.: &jsk->sk_session_queue_lock ->&list->lock#19 ->&obj_hash[i].lock ->pool_lock#2 ->&jsk->waitq FD: 1 BD: 71 ..-.: &list->lock#19 FD: 53 BD: 68 +.-.: &priv->active_session_list_lock ->(console_sem).lock ->hrtimer_bases.lock ->pool_lock#2 ->tk_core.seq.seqcount ->&list->lock#12 ->&priv->j1939_socks_lock ->&list->lock#19 ->&obj_hash[i].lock ->&zone->lock FD: 18 BD: 71 +.-.: _xmit_NONE#2 ->pool_lock#2 ->&obj_hash[i].lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 1 BD: 6 +.-.: &jsk->filters_lock FD: 367 BD: 1 +.+.: &type->s_umount_key#49/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->nls_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->rcu_node_0 ->&rq->__lock ->tk_core.seq.seqcount ->&base->lock ->bit_wait_table + i ->&c->lock ->&____s->seqcount#2 ->&sb->s_type->i_lock_key#34 ->&folio_wait_table[i] ->&wb->list_lock ->&wb->work_lock ->&tree->tree_lock ->&dentry->d_lock ->&n->list_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&s->s_inode_list_lock ->&tree->hash_lock ->lock#5 ->&lruvec->lru_lock ->&fsnotify_mark_srcu ->&x->wait#23 ->&sb->map[i].swap_lock FD: 35 BD: 320 ..-.: &lo->lo_work_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock FD: 458 BD: 1 +.+.: (wq_completion)loop4 ->(work_completion)(&worker->work) ->(work_completion)(&lo->rootcg_work) FD: 456 BD: 6 +.+.: (work_completion)(&worker->work) ->&lo->lo_work_lock ->tk_core.seq.seqcount ->&p->pi_lock ->lock#4 ->sb_writers#5 ->&rq->__lock ->rcu_node_0 ->&sb->s_type->i_mutex_key#13 FD: 1 BD: 510 ....: &shmem_falloc_waitq FD: 9 BD: 400 +...: map_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 2 BD: 1 ....: &ctx->fault_pending_wqh ->&ctx->fault_wqh FD: 1 BD: 2 ....: &ctx->fault_wqh FD: 1 BD: 1 ....: &ctx->event_wqh FD: 1 BD: 1 ....: &ctx->fd_wqh FD: 456 BD: 6 +.+.: (work_completion)(&lo->rootcg_work) ->&lo->lo_work_lock ->lock#4 ->tk_core.seq.seqcount ->&p->pi_lock ->sb_writers#5 ->&rq->__lock ->rcu_node_0 ->&sb->s_type->i_mutex_key#13 ->&rcu_state.gp_wq ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1098 BD: 2 +.+.: (work_completion)(&map->work) ->&obj_hash[i].lock ->&x->wait#2 ->&rq->__lock ->&htab->buckets[i].lock ->stock_lock ->pool_lock#2 ->&vn->busy.lock ->&vn->lazy.lock ->dev_map_lock ->rcu_state.barrier_mutex ->pcpu_lock ->&zone->lock ->&cfs_rq->removed.lock ->rcu_node_0 ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock ->cgroup_mutex FD: 59 BD: 582 +.+.: &sb->s_type->i_lock_key#34 ->&dentry->d_lock ->&xa->xa_lock#9 FD: 29 BD: 70 ..-.: &jsk->waitq ->&p->pi_lock FD: 129 BD: 8 +.+.: &tree->tree_lock ->&tree->hash_lock ->fs_reclaim ->pool_lock#2 ->lock#4 ->&node->lock_wq ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->inode_hash_lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#34 ->&obj_hash[i].lock ->&HFS_I(tree->inode)->extents_lock ->&wb->list_lock ->&sbi->work_lock ->&mapping->i_private_lock ->&xa->xa_lock#9 ->tk_core.seq.seqcount FD: 5 BD: 12 +.+.: &tree->hash_lock ->lock#4 FD: 1 BD: 11 ....: &node->lock_wq FD: 1 BD: 3 +...: &htab->buckets[i].lock FD: 153 BD: 2 .+.+: sb_writers#11 ->mount_lock ->&type->i_mutex_dir_key#8 ->&sb->s_type->i_mutex_key#19 ->&sb->s_type->i_lock_key#34 ->&wb->list_lock ->&xa->xa_lock#9 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->(console_sem).lock ->&obj_hash[i].lock ->lock#4 ->lock#5 ->&f->f_lock ->tomoyo_ss ->tk_core.seq.seqcount ->&folio_wait_table[i] ->&rq->__lock FD: 115 BD: 3 ++++: &type->i_mutex_dir_key#8 ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->tomoyo_ss ->pool_lock#2 ->&tree->tree_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->inode_hash_lock ->&sb->s_type->i_lock_key#34 ->&wb->list_lock ->&sbi->work_lock ->&rq->__lock ->&c->lock ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 14 BD: 15 +.+.: &sbi->work_lock ->&obj_hash[i].lock ->&base->lock FD: 16 BD: 9 +.+.: &HFS_I(tree->inode)->extents_lock ->&sbi->bitmap_lock FD: 15 BD: 12 +.+.: &sbi->bitmap_lock ->&sbi->work_lock FD: 20 BD: 1 +.-.: (&tun->flow_gc_timer) ->&tun->lock FD: 19 BD: 68 +.-.: &tun->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock FD: 136 BD: 3 +.+.: &sb->s_type->i_mutex_key#19 ->tk_core.seq.seqcount ->fs_reclaim ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&mapping->i_private_lock ->&HFS_I(inode)->extents_lock ->&sbi->work_lock ->&sb->s_type->i_lock_key#34 ->lock#5 ->&c->lock ->&____s->seqcount#2 ->&mapping->i_mmap_rwsem ->&obj_hash[i].lock ->&lruvec->lru_lock ->&tree->tree_lock ->&wb->list_lock ->&(&sbi->mdb_work)->timer ->&base->lock ->&x->wait#10 ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&folio_wait_table[i] FD: 119 BD: 4 +.+.: &HFS_I(inode)->extents_lock ->&sbi->bitmap_lock ->fs_reclaim ->pool_lock#2 ->&tree->tree_lock/1 ->&c->lock FD: 118 BD: 5 +.+.: &tree->tree_lock/1 ->&tree->hash_lock ->fs_reclaim ->pool_lock#2 ->lock#4 ->&node->lock_wq ->&mapping->i_private_lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#34 ->&wb->list_lock ->&obj_hash[i].lock ->&sbi->bitmap_lock ->&sbi->work_lock ->(console_sem).lock FD: 1 BD: 1 ....: _rs.lock FD: 84 BD: 5 +.+.: hashlimit_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->init_mm.page_table_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->&ent->pde_unload_lock FD: 38 BD: 68 +.-.: (&peer->timer_persistent_keepalive) ->pool_lock#2 ->&list->lock#14 ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->init_task.mems_allowed_seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 15 BD: 67 +.-.: (&hsr->prune_timer) ->&hsr->list_lock ->&obj_hash[i].lock ->&base->lock FD: 1090 BD: 1 +.+.: ppp_mutex ->pcpu_alloc_mutex ->&pn->all_ppp_mutex ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#4 ->net_rwsem ->&tn->lock ->&x->wait#8 ->&obj_hash[i].lock ->&c->lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&____s->seqcount#2 ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->nl_table_lock ->nl_table_wait.lock ->subsys mutex#19 ->&dir->lock#2 ->&n->list_lock ->&rq->__lock ->dev_hotplug_mutex ->input_pool.lock ->batched_entropy_u32.lock ->&tbl->lock ->stock_lock ->sysctl_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&idev->mc_lock ->&pnettable->lock ->smc_ib_devices.mutex ->&mm->mmap_lock ->&pn->all_channels_lock ->&pch->upl ->stack_depot_init_mutex ->rtnl_mutex ->&ppp->wlock ->&pch->chan_sem ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 84 BD: 67 +.+.: &pn->all_ppp_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&pch->upl FD: 1 BD: 1 +...: &hinfo->lock FD: 1 BD: 1 ....: &group->fanotify_data.access_waitq FD: 23 BD: 1 +.+.: &nofs_marks_lock ->&fsnotify_mark_srcu ->ucounts_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->stock_lock ->&mark->lock ->&conn->lock FD: 35 BD: 1 ..-.: &(&conn->info_timer)->timer FD: 2 BD: 2 +.+.: (work_completion)(&(&conn->info_timer)->work) ->&conn->chan_lock FD: 31 BD: 71 +...: &ppp->rlock ->&pch->downl ->&obj_hash[i].lock ->pool_lock#2 ->&pf->rwait FD: 33 BD: 70 +...: &ppp->wlock ->&ppp->rlock ->&list->lock#26 ->&pf->rwait FD: 133 BD: 1 +.+.: &type->s_umount_key#50 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#34 ->&dentry->d_lock/1 ->pool_lock#2 ->&wb->work_lock ->&bdi->wb_waitq ->&rq->__lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#3 ->&wb->list_lock ->mmu_notifier_invalidate_range_start ->&c->lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&mapping->i_private_lock ->lock#4 ->lock#5 ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&s->s_inode_list_lock ->inode_hash_lock ->&fsnotify_mark_srcu ->&base->lock ->&tree->hash_lock ->stock_lock ->&lruvec->lru_lock FD: 29 BD: 20 ....: &bdi->wb_waitq ->&p->pi_lock FD: 39 BD: 17 +.+.: &bdi->wb_switch_rwsem ->&bdi->wb_waitq ->&rq->__lock ->rcu_node_0 ->&rcu_state.gp_wq FD: 69 BD: 17 +.+.: &s->s_sync_lock ->&sb->s_type->i_lock_key#35 ->&folio_wait_table[i] ->&rq->__lock ->lock#4 ->lock#5 ->rcu_node_0 FD: 1 BD: 29 ....: &lo->lo_lock FD: 1 BD: 6 ..-.: rlock-AF_CAN FD: 1 BD: 68 +...: clock-AF_CAN FD: 1 BD: 1 +.+.: hash_lock FD: 43 BD: 1 -.-.: &new_timer->it_lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->hrtimer_bases.lock FD: 28 BD: 72 +.+.: &net->xdp.lock ->&rq->__lock FD: 1 BD: 72 +.+.: mirred_list_lock FD: 121 BD: 74 +.+.: &nft_net->commit_mutex ->&rq->__lock ->fs_reclaim ->stock_lock ->&____s->seqcount ->pool_lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->rcu_node_0 ->&x->wait#2 ->&ht->mutex ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->key ->pcpu_lock ->percpu_counters_lock ->nl_table_lock ->nl_table_wait.lock ->rlock-AF_NETLINK ->&p->alloc_lock ->(console_sem).lock FD: 13 BD: 72 +...: &idev->mc_query_lock ->&obj_hash[i].lock FD: 1 BD: 72 +...: &idev->mc_report_lock FD: 3 BD: 1 +.+.: &card->files_lock ->shutdown_lock ->&card->remove_sleep FD: 29 BD: 72 +.+.: &pnn->pndevs.lock ->&rq->__lock ->rcu_node_0 FD: 28 BD: 72 +.+.: &pnn->routes.lock ->&rq->__lock FD: 1 BD: 67 +...: &dev_addr_list_lock_key#15 FD: 30 BD: 72 +.+.: &dev->ethtool->rss_lock ->&xa->xa_lock#4 ->&rq->__lock ->rcu_node_0 FD: 29 BD: 2 ....: &pcm->open_wait ->&p->pi_lock FD: 91 BD: 1 +.+.: &pcm->open_mutex ->&card->ctl_files_rwlock ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&obj_hash[i].lock ->&pcm->open_wait ->&timer->lock ->&loopback->cable_lock ->&group->lock#2 ->&card->memory_mutex ->&vn->busy.lock ->&vn->lazy.lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.expedited_wq ->(&dpcm->timer) ->&base->lock ->pm_qos_lock ->&c->lock ->&n->list_lock ->quarantine_lock ->stock_lock FD: 29 BD: 72 ....: &pf->rwait ->pool_lock#2 ->&p->pi_lock FD: 1 BD: 2 +.+.: shutdown_lock FD: 44 BD: 69 +.+.: rcu_state.barrier_mutex ->rcu_state.barrier_lock ->&x->wait#26 ->&rq->__lock ->rcu_state.barrier_mutex.wait_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->pool_lock#2 ->stock_lock FD: 29 BD: 70 ..-.: &x->wait#26 ->&p->pi_lock FD: 1 BD: 1 ....: &card->power_ref_sleep FD: 1 BD: 12 ..-.: &timer->lock FD: 1 BD: 1 ....: &ctl->read_lock FD: 1 BD: 21 ....: netdev_unregistering_wq.lock FD: 1 BD: 2 ....: &card->remove_sleep FD: 411 BD: 1 +.+.: &type->s_umount_key#51/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->&c->lock ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->percpu_counters_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->wq_pool_mutex ->&____s->seqcount#2 ->&zone->lock ->&rq->__lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&base->lock ->&x->wait#27 ->semaphore->lock#2 ->&bp->b_lock ->key#19 ->key#20 ->key#21 ->key#22 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->batched_entropy_u32.lock ->(console_sem).lock ->&n->list_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&x->wait#25 ->(&timer.timer) ->&sb->map[i].swap_lock ->&bch->bc_lock ->&ip->i_flags_lock ->&s->s_inode_list_lock ->&sb->s_type->i_lock_key#35 ->&xfs_nondir_ilock_class/6 ->&xfs_nondir_ilock_class#2/7 ->&mp->m_sb_lock ->key#23 ->&wq->mutex ->&lru->node[i].lock ->sb_internal#2 ->&x->wait#28 ->&xfs_nondir_ilock_class#3 ->&x->wait#8 ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#8 ->uevent_sock_mutex ->&pctl->poll_wait ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->&x->wait#10 ->&qinf->qi_tree_lock ->&cil->xc_ctx_lock ->&dentry->d_lock ->rcu_node_0 ->remove_cache_srcu ->&bp->b_waiters ->wq_pool_mutex.wait_lock ->&rcu_state.expedited_wq FD: 29 BD: 170 ....: &x->wait#27 ->&p->pi_lock FD: 458 BD: 1 +.+.: (wq_completion)loop2 ->(work_completion)(&worker->work) ->(work_completion)(&lo->rootcg_work) ->&rq->__lock FD: 91 BD: 2 +.+.: (work_completion)(&(&ctx->free_rwork)->work) ->&mapping->i_mmap_rwsem ->&sb->s_type->i_lock_key#18 ->lock#4 ->lock#5 ->&mapping->i_private_lock ->&obj_hash[i].lock ->&base->lock ->&lruvec->lru_lock ->pcpu_lock ->percpu_ref_switch_lock ->pool_lock#2 FD: 85 BD: 1 +.+.: (work_completion)(&bp->b_ioend_work) ->&x->wait#27 ->&ailp->ail_lock ->&bp->b_lock ->&obj_hash[i].lock ->pool_lock#2 ->&x->wait#28 ->semaphore->lock#2 ->&ip->i_flags_lock ->&iip->ili_lock ->&rq->__lock ->(console_sem).lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 183 ....: semaphore->lock#2 FD: 5 BD: 181 +.+.: &bp->b_lock ->&bch->bc_lock ->&lru->node[i].lock FD: 1 BD: 168 ....: key#19 FD: 1 BD: 168 ....: key#20 FD: 1 BD: 179 ....: key#21 FD: 1 BD: 2 ....: key#22 FD: 1 BD: 415 +.+.: &mp->m_perag_lock FD: 65 BD: 178 +.+.: &ailp->ail_lock ->&ailp->ail_empty ->&p->pi_lock ->&ip->i_flags_lock ->semaphore->lock#2 FD: 383 BD: 1 +.+.: &type->s_umount_key#52/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->&c->lock ->&nilfs->ns_sem ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&____s->seqcount#2 ->&sb->map[i].swap_lock ->bit_wait_table + i ->&rq->__lock ->(console_sem).lock ->console_owner_lock ->console_owner ->rcu_node_0 ->&xa->xa_lock#5 ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#36 ->&nilfs_bmap_mdt_lock_key ->&k->list_lock ->lock ->&root->kernfs_rwsem ->&nilfs->ns_last_segment_lock ->&nilfs->ns_cptree_lock ->&nilfs->ns_segctor_sem ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&sci->sc_wait_task ->&dat_lock_key ->&dentry->d_lock FD: 66 BD: 4 ++++: &nilfs->ns_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&rq->__lock ->pool_lock#2 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->bit_wait_table + i ->crngs.lock ->&nilfs->ns_last_segment_lock ->&fq->mq_flush_lock FD: 4 BD: 182 +.+.: &bch->bc_lock ->&lru->node[i].lock FD: 126 BD: 1 +.+.: &ip->i_lock ->&ip->i_flags_lock ->&pag->pag_ici_lock ->&s->s_inode_list_lock ->&sb->s_type->i_lock_key#35 ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->tk_core.seq.seqcount ->&mp->m_sb_lock ->semaphore->lock#2 ->key#19 ->key#20 ->key#21 ->&iip->ili_lock ->&bch->bc_lock ->&x->wait#27 ->&rq->__lock ->&bp->b_lock ->&cil->xc_ctx_lock ->&obj_hash[i].lock ->&xfs_dquot_project_class ->&sb->map[i].swap_lock ->&n->list_lock ->&dqp->q_qlock ->&xfs_dquot_group_class FD: 61 BD: 414 +.+.: &ip->i_flags_lock ->&sb->s_type->i_lock_key#35 ->&mp->m_perag_lock FD: 62 BD: 400 +.+.: &pag->pag_ici_lock ->&c->lock ->pool_lock#2 ->&ip->i_flags_lock ->&mp->m_perag_lock ->&obj_hash[i].lock FD: 59 BD: 609 +.+.: &sb->s_type->i_lock_key#35 ->&dentry->d_lock ->&xa->xa_lock#9 FD: 1 BD: 2 +.+.: &xfs_nondir_ilock_class/6 FD: 1 BD: 2 +.+.: &xfs_nondir_ilock_class#2/7 FD: 2 BD: 168 +.+.: &mp->m_sb_lock ->key#21 FD: 1 BD: 3 ....: key#23 FD: 170 BD: 165 .+.+: sb_internal#2 ->key#21 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->batched_entropy_u32.lock ->&bch->bc_lock ->tk_core.seq.seqcount ->&x->wait#27 ->&rq->__lock ->&bp->b_lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&pag->pag_state_lock ->&obj_hash[i].lock ->stock_lock ->lock ->semaphore->lock#2 ->&cil->xc_ctx_lock ->&xfs_nondir_ilock_class#3 ->&xfs_dquot_project_class ->&xfs_dir_ilock_class/5 ->&dqp->q_qlock ->&xfs_dquot_group_class ->&xfs_nondir_ilock_class#3/1 ->&xfs_dir_ilock_class ->&dqp->q_qlock/1 FD: 60 BD: 582 +.+.: &sb->s_type->i_lock_key#36 ->&dentry->d_lock ->&xa->xa_lock#9 ->bit_wait_table + i FD: 109 BD: 27 ++++: &nilfs_bmap_mdt_lock_key ->&cache->lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->&mapping->i_private_lock ->&nilfs_bmap_dat_lock_key ->tk_core.seq.seqcount ->&sb->map[i].swap_lock ->bit_wait_table + i ->&rq->__lock ->&bgl->locks[i].lock#2 ->&sb->s_type->i_lock_key#36 FD: 1 BD: 29 +.+.: &cache->lock FD: 104 BD: 29 ++++: &nilfs_bmap_dat_lock_key ->inode_hash_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->stock_lock ->&sb->s_type->i_lock_key#36 ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->&wb->list_lock ->&____s->seqcount#2 ->&c->lock ->&obj_hash[i].lock ->lock#5 FD: 1 BD: 26 +.+.: &nilfs->ns_last_segment_lock FD: 1 BD: 168 +.+.: &pag->pag_state_lock FD: 10 BD: 172 +.+.: &iip->ili_lock ->&bp->b_lock ->&obj_hash[i].lock ->pool_lock#2 ->semaphore->lock#2 FD: 71 BD: 177 ++++: &cil->xc_ctx_lock ->&ip->i_flags_lock ->&obj_hash[i].lock ->pool_lock#2 ->key#21 ->semaphore->lock#2 ->&bp->b_lock ->&cil->xc_push_lock ->&____s->seqcount ->&rq->__lock FD: 165 BD: 166 ++++: &xfs_nondir_ilock_class#3 ->&obj_hash[i].lock ->pool_lock#2 ->&bch->bc_lock ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->rcu_node_0 ->&rq->__lock ->&x->wait#27 ->&bp->b_lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->semaphore->lock#2 ->&pag->pagb_lock ->&iip->ili_lock ->&cil->xc_ctx_lock ->&qinf->qi_tree_lock ->&xfs_dquot_project_class ->key#21 ->&ip->i_flags_lock ->&pag->pag_ici_lock ->&s->s_inode_list_lock ->&sb->s_type->i_lock_key#35 ->&n->list_lock ->(console_sem).lock ->&wq->mutex ->&cil->xc_push_lock ->&x->wait#10 ->&log->l_icloglock ->semaphore->lock#3 ->&fq->mq_flush_lock ->&iclog->ic_force_wait ->&head->lock ->&pag->pag_state_lock ->&ailp->ail_lock ->&dqp->q_qlock ->&xfs_dquot_group_class ->&xfs_nondir_ilock_class#3/1 ->&mru->lock ->lock ->&mp->m_sb_lock ->key#19 ->key#20 ->&sb->map[i].swap_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 179 ....: &x->wait#28 FD: 1 BD: 4 +.+.: &nilfs->ns_cptree_lock FD: 122 BD: 24 ++++: &nilfs->ns_segctor_sem ->inode_hash_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->stock_lock ->&mi->mi_sem ->&sb->s_type->i_lock_key#36 ->&s->s_inode_list_lock ->&cache->lock ->&nilfs_bmap_mdt_lock_key ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&bgl->locks[i].lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&xa->xa_lock#9 ->&wb->list_lock ->&wb->work_lock ->&nilfs->ns_next_gen_lock ->&nilfs->ns_inode_lock ->lock#4 ->&mapping->i_private_lock ->&dat_lock_key ->&sci->sc_state_lock ->&bmap->b_sem ->lock#5 ->(&sci->sc_timer) ->&obj_hash[i].lock ->&base->lock ->&nilfs_bmap_dat_lock_key ->&sb->s_type->i_lock_key#3 ->&x->wait#29 ->&nilfs->ns_last_segment_lock ->&lruvec->lru_lock FD: 110 BD: 25 ++++: &mi->mi_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->&mapping->i_private_lock ->&nilfs_bmap_mdt_lock_key ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#36 ->&wb->list_lock FD: 29 BD: 3 ....: &pctl->poll_wait ->&p->pi_lock FD: 334 BD: 1 +.+.: &type->s_umount_key#54/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&rq->__lock ->stock_lock ->&c->lock ->&nilfs_bmap_mdt_lock_key ->inode_hash_lock ->&____s->seqcount#2 ->&____s->seqcount ->&xa->xa_lock#5 ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#37 ->namespace_sem ->&sb->s_type->i_lock_key#36 ->&type->s_vfs_rename_key ->&dentry->d_lock ->sb_writers#12 ->unnamed_dev_ida.xa_lock ->&s->s_inode_list_lock ->&n->list_lock ->remove_cache_srcu ->key#9 ->&sb->s_type->i_lock_key ->&type->s_vfs_rename_key#2 ->sb_writers#5 ->&xattrs->lock ->crngs.lock ->&type->i_mutex_dir_key#5 ->lock#10 ->key#3 ->key#14 ->key#4 ->&sb->s_type->i_lock_key#23 ->&type->s_vfs_rename_key#3 ->sb_writers#3 ->&ei->xattr_sem ->&type->i_mutex_dir_key#3 ->&sb->s_type->i_lock_key#45 ->&type->s_vfs_rename_key#4 ->sb_writers#20 ->&sb->s_type->i_lock_key#44 ->&type->s_vfs_rename_key#5 ->sb_writers#22 FD: 201 BD: 1 +.+.: (work_completion)(&pwork->work) ->fs_reclaim ->pool_lock#2 ->semaphore->lock#2 ->mmu_notifier_invalidate_range_start ->free_vmap_area_lock ->&vn->busy.lock ->&____s->seqcount ->init_mm.page_table_lock ->&rq->__lock ->&xa->xa_lock#20 ->&vbq->lock ->&bp->b_lock ->&obj_hash[i].lock ->&ip->i_flags_lock ->&qinf->qi_tree_lock ->&____s->seqcount#2 ->&c->lock ->&x->wait#28 ->&xfs_nondir_ilock_class#3 ->sb_internal#2 ->&xfs_dquot_project_class ->lock ->&s->s_inode_list_lock ->&sb->s_type->i_lock_key#35 ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&pag->pag_ici_lock ->&pctl->poll_wait ->&dqp->q_qlock ->&xfs_dquot_group_class ->&n->list_lock FD: 29 BD: 26 ....: &sci->sc_wait_task ->&p->pi_lock FD: 111 BD: 25 .+.+: &dat_lock_key ->&cache->lock ->&rq->__lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->&mapping->i_private_lock ->&nilfs_bmap_mdt_lock_key ->tk_core.seq.seqcount ->rcu_node_0 ->&c->lock ->&____s->seqcount#2 ->&bmap->b_sem FD: 9 BD: 2 +.+.: &xa->xa_lock#20 ->&c->lock ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 2 +.+.: &vbq->lock FD: 100 BD: 168 +.+.: &qinf->qi_tree_lock ->&xfs_dquot_project_class ->&lru->node[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->&dqp->q_qlock ->&xfs_dquot_group_class ->mmu_notifier_invalidate_range_start ->&c->lock FD: 5 BD: 168 +.+.: &pag->pagb_lock ->&obj_hash[i].lock ->pool_lock#2 ->&pag->pagb_wait FD: 96 BD: 171 +.+.: &xfs_dquot_project_class ->&lru->node[i].lock ->&x->wait#28 ->semaphore->lock#2 ->&wq->mutex ->&cil->xc_push_lock ->&x->wait#10 ->&rq->__lock ->&log->l_icloglock ->semaphore->lock#3 ->tk_core.seq.seqcount ->&fq->mq_flush_lock ->&bp->b_lock ->pool_lock#2 ->&cil->xc_ctx_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&obj_hash[i].lock ->&n->list_lock ->&ailp->ail_lock FD: 33 BD: 25 +.+.: &sci->sc_state_lock ->&sci->sc_wait_daemon ->&obj_hash[i].lock ->&base->lock ->&sci->sc_wait_request ->(&sci->sc_timer) ->&sci->sc_wait_task FD: 29 BD: 26 ....: &sci->sc_wait_daemon ->&p->pi_lock FD: 117 BD: 2 +.+.: (work_completion)(&sci->sc_iput_work) ->&nilfs->ns_inode_lock ->&sb->s_type->i_lock_key#36 ->&wb->list_lock ->&s->s_inode_list_lock ->bit_wait_table + i ->&rq->__lock ->sb_internal#3 ->inode_hash_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu FD: 37 BD: 179 +.+.: &cil->xc_push_lock ->&cil->xc_start_wait ->&cil->xc_commit_wait FD: 87 BD: 1 +.+.: (work_completion)(&ctx->push_work) ->&rq->__lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u32.lock ->&cil->xc_ctx_lock ->&cil->xc_push_lock ->&log->l_icloglock ->&ailp->ail_lock ->&c->lock FD: 69 BD: 176 +.+.: &log->l_icloglock ->&iclog->ic_write_wait ->&iclog->ic_force_wait ->&log->l_flush_wait ->&ailp->ail_lock FD: 1 BD: 180 ....: &cil->xc_start_wait FD: 29 BD: 180 ....: &cil->xc_commit_wait ->&p->pi_lock FD: 1 BD: 174 ....: semaphore->lock#3 FD: 307 BD: 7 +.+.: &sb->s_type->i_mutex_key#23 ->&ei->i_mmap_lock ->tk_core.seq.seqcount ->fs_reclaim ->pool_lock#2 ->&space_info->lock ->mmu_notifier_invalidate_range_start ->&tree->lock ->&fs_info->qgroup_lock ->&root->qgroup_meta_rsv_lock ->&ei->lock ->&rsv->lock ->lock#4 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->&c->lock ->&____s->seqcount#2 ->&ei->ordered_tree_lock ->&obj_hash[i].lock ->&tree->lock#2 ->&sb->s_type->i_lock_key#38 ->&wb->list_lock ->&rq->__lock ->&sem->wait_lock ->remove_cache_srcu ->rcu_node_0 ->&p->pi_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->btrfs-tree-01 ->btrfs-tree-00 ->&eb->refs_lock ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->&folio_wait_table[i] ->sb_internal#4 ->&sb->s_type->i_mutex_key#23/4 ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&root->ordered_extent_lock ->btrfs_trans_num_writers ->&base->lock ->(&timer.timer) ->&fs_info->trans_lock ->&cur_trans->writer_wait ->btrfs_trans_completed ->&fs_info->scrub_pause_wait ->&n->list_lock ->&sb->map[i].swap_lock ->btrfs-tree-00/1 FD: 82 BD: 1 +.+.: (work_completion)(&iclog->ic_end_io_work) ->&log->l_icloglock ->&ailp->ail_lock ->&ip->i_flags_lock ->&bp->b_waiters ->&bp->b_lock ->&cil->xc_push_lock ->&obj_hash[i].lock ->pool_lock#2 ->semaphore->lock#3 ->&dqp->q_pinwait ->&rq->__lock ->&pag->pagb_lock FD: 1 BD: 177 ....: &iclog->ic_write_wait FD: 29 BD: 179 ....: &ailp->ail_empty ->&p->pi_lock FD: 29 BD: 3 ....: &bp->b_waiters ->&p->pi_lock FD: 29 BD: 177 ....: &iclog->ic_force_wait ->&p->pi_lock FD: 1 BD: 177 ....: &log->l_flush_wait FD: 102 BD: 166 ++++: &xfs_dir_ilock_class ->&qinf->qi_tree_lock ->&xfs_dquot_project_class ->&iip->ili_lock ->&ip->i_flags_lock ->&ailp->ail_lock ->&bp->b_lock ->&dqp->q_qlock ->&xfs_dquot_group_class ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->semaphore->lock#2 ->&rq->__lock ->&c->lock FD: 183 BD: 9 .+.+: sb_writers#12 ->mount_lock ->&type->i_mutex_dir_key#9 ->&type->i_mutex_dir_key#9/1 ->inode_hash_lock ->fs_reclaim ->pool_lock#2 ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#37 ->&dentry->d_lock ->tomoyo_ss ->&obj_hash[i].lock ->(console_sem).lock ->&rq->__lock ->&s->s_inode_list_lock ->&type->s_vfs_rename_key ->&sb->s_type->i_lock_key#36 ->sb_internal#3 ->&fsnotify_mark_srcu FD: 162 BD: 12 ++++: &type->i_mutex_dir_key#9 ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->&rq->__lock ->tomoyo_ss ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&dat_lock_key ->pool_lock#2 ->tk_core.seq.seqcount ->&folio_wait_table[i] ->sb_internal#3 ->namespace_sem ->&c->lock ->&n->list_lock ->inode_hash_lock ->&sb->s_type->i_lock_key#36 FD: 108 BD: 26 ++++: &bmap->b_sem ->&cache->lock ->&nilfs_bmap_dat_lock_key ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&bgl->locks[i].lock#2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->&sb->s_type->i_lock_key#36 ->&wb->list_lock FD: 123 BD: 19 .+.+: sb_internal#3 ->&nilfs->ns_segctor_sem FD: 463 BD: 1 ++++: &type->s_umount_key#53 ->&mm->mmap_lock ->&qinf->qi_tree_lock ->&xfs_dquot_project_class ->fs_reclaim ->pool_lock#2 ->sb_internal#2 ->&obj_hash[i].lock ->&x->wait#23 ->shrinker_mutex ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#35 ->&dentry->d_lock/1 ->rcu_node_0 ->&rq->__lock ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&wq->mutex ->&cil->xc_push_lock ->&x->wait#10 ->&log->l_icloglock ->semaphore->lock#3 ->tk_core.seq.seqcount ->&fq->mq_flush_lock ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&ip->i_flags_lock ->&xfs_nondir_ilock_class#3 ->&pag->pag_ici_lock ->&zone->lock ->&____s->seqcount ->lock#4 ->lock#5 ->&lruvec->lru_lock ->(console_sem).lock ->&mru->lock ->key#21 ->&pag->pagb_wait ->(wq_completion)xfsdiscard ->&ailp->ail_lock ->&base->lock ->&xfs_dir_ilock_class ->&pag->pag_state_lock ->&mp->m_sb_lock ->key#20 ->key#19 ->key#23 ->semaphore->lock#2 ->mmu_notifier_invalidate_range_start ->batched_entropy_u32.lock ->&cil->xc_ctx_lock ->&iclog->ic_force_wait ->&lru->node[i].lock ->&bp->b_lock ->&vb->lock ->&p->pi_lock ->&x->wait ->&root->kernfs_rwsem ->sysfs_symlink_target_lock ->kernfs_idr_lock ->&x->wait#30 ->pcpu_lock ->wq_mayday_lock ->wq_pool_mutex ->&mp->m_perag_lock ->&ht->mutex ->&pag->pag_active_wq ->&sem->wait_lock ->&x->wait#31 ->&x->wait#32 ->&k->list_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->&x->wait#25 ->(&timer.timer) ->lock#2 ->&sb->s_type->i_lock_key#3 ->&bdi->wb_waitq ->quarantine_lock ->&dqp->q_qlock ->&xfs_dquot_group_class ->&cil->xc_commit_wait ->&x->wait#28 ->&c->lock FD: 1 BD: 29 +.+.: &bgl->locks[i].lock#2 FD: 1 BD: 25 +.+.: &nilfs->ns_next_gen_lock FD: 61 BD: 25 +.+.: &nilfs->ns_inode_lock ->&sb->s_type->i_lock_key#36 FD: 188 BD: 2 .+.+: sb_writers#13 ->mount_lock ->&inode->i_sb->s_type->i_mutex_dir_key ->tomoyo_ss ->&xfs_nondir_ilock_class#3 ->&sb->s_type->i_mutex_key#20 ->fs_reclaim ->&obj_hash[i].lock ->&c->lock ->&dentry->d_lock ->pool_lock#2 ->sb_internal#2 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#35 ->&rq->__lock ->&ip->i_flags_lock ->&wq->mutex FD: 168 BD: 3 ++++: &inode->i_sb->s_type->i_mutex_dir_key ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->tomoyo_ss ->pool_lock#2 ->&xfs_dir_ilock_class ->&obj_hash[i].lock ->&ip->i_flags_lock ->mmu_notifier_invalidate_range_start ->&pag->pag_ici_lock ->&s->s_inode_list_lock ->&sb->s_type->i_lock_key#35 ->&xfs_nondir_ilock_class#3 ->sb_internal#2 ->&xfs_dir_ilock_class/5 ->&c->lock ->&n->list_lock ->tk_core.seq.seqcount ->&qinf->qi_tree_lock ->&rq->__lock ->&x->wait#28 ->semaphore->lock#2 ->&bp->b_lock ->&dqp->q_qlock ->&____s->seqcount ->&____s->seqcount#2 ->rcu_node_0 FD: 164 BD: 11 +.+.: &type->i_mutex_dir_key#9/1 ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->tomoyo_ss ->sb_internal#3 ->&type->i_mutex_dir_key#9/5 ->pool_lock#2 ->lock#4 ->&type->i_mutex_dir_key#9 ->&rq->__lock ->&c->lock ->&sb->s_type->i_mutex_key#21/4 ->&sb->s_type->i_mutex_key#22 ->&sb->s_type->i_lock_key#36 ->&fsnotify_mark_srcu ->&s->s_inode_list_lock ->inode_hash_lock ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#37 ->mmu_notifier_invalidate_range_start ->&n->list_lock ->smack_known_lock FD: 43 BD: 501 +.+.: &sb->s_type->i_lock_key#37 ->&dentry->d_lock FD: 165 BD: 10 +.+.: &type->s_vfs_rename_key ->&type->i_mutex_dir_key#9/1 ->&type->i_mutex_dir_key#9/5 FD: 148 BD: 12 +.+.: &type->i_mutex_dir_key#9/5 ->&sb->s_type->i_mutex_key#22 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->stock_lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key#21/4 FD: 209 BD: 3 ++++: &sb->s_type->i_mutex_key#20 ->tk_core.seq.seqcount ->mapping.invalidate_lock#3 ->fs_reclaim ->pool_lock#2 ->&xfs_nondir_ilock_class#3 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->&ip->i_flags_lock ->sb_internal#2 ->&sb->s_type->i_lock_key#35 ->lock#4 ->lock#5 ->&lruvec->lru_lock ->rcu_node_0 ->mmu_notifier_invalidate_range_start ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->&wb->list_lock ->&wb->work_lock ->&ifs->state_lock ->&sb->s_type->i_mutex_key#20/4 FD: 207 BD: 157 ++++: mapping.invalidate_lock#3 ->&xfs_nondir_ilock_class#3 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->tk_core.seq.seqcount ->&x->wait#25 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->&sb->s_type->i_lock_key#35 ->&wb->list_lock ->&wb->work_lock ->&mapping->i_mmap_rwsem ->&ip->i_flags_lock ->&____s->seqcount#2 ->&c->lock ->pool_lock#2 ->lock#5 ->rcu_node_0 ->fs_reclaim ->sb_internal#2 ->&ifs->state_lock ->mapping.invalidate_lock#3/1 FD: 35 BD: 2 ..-.: &ip->i_ioend_lock FD: 13 BD: 70 +...: &dccp_hashinfo.bhash2[i].lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->stock_lock ->clock-AF_INET6 ->&obj_hash[i].lock FD: 172 BD: 1 +.+.: (work_completion)(&ip->i_ioend_work) ->&ip->i_ioend_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->sb_internal#2 ->&obj_hash[i].lock ->&xa->xa_lock#9 ->&xfs_nondir_ilock_class#3 ->&folio_wait_table[i] ->&rq->__lock FD: 126 BD: 13 +.+.: &sb->s_type->i_mutex_key#21/4 ->sb_internal#3 ->rename_lock FD: 124 BD: 13 +.+.: &sb->s_type->i_mutex_key#22 ->sb_internal#3 ->&dentry->d_lock ->tk_core.seq.seqcount FD: 157 BD: 166 +.+.: &xfs_dir_ilock_class/5 ->semaphore->lock#2 ->pool_lock#2 ->&pag->pag_state_lock ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->stock_lock ->&c->lock ->batched_entropy_u32.lock ->lock ->tk_core.seq.seqcount ->&rq->__lock ->&bch->bc_lock ->&x->wait#27 ->&bp->b_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 398 BD: 3 +.+.: uuid_mutex ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->inode_hash_lock ->&xa->xa_lock#5 ->stock_lock ->&sb->s_type->i_lock_key#3 ->&disk->open_mutex ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->mmu_notifier_invalidate_range_start ->&mapping->i_private_lock ->&c->lock ->&____s->seqcount#2 ->tk_core.seq.seqcount ->rcu_node_0 ->&rq->__lock ->&fs_devs->device_list_mutex ->bdev_lock ->lock#2 ->lock#5 ->&lruvec->lru_lock ->&n->list_lock ->&fs_info->mapping_tree_lock ->&tree->lock ->krc.lock ->&folio_wait_table[i] FD: 458 BD: 1 +.+.: (wq_completion)loop0 ->(work_completion)(&worker->work) ->(work_completion)(&lo->rootcg_work) ->&rq->__lock FD: 176 BD: 173 +.+.: &fs_devs->device_list_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->(console_sem).lock ->btrfs-dev-00 ->btrfs-dev-00/1 ->&eb->refs_lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&fq->mq_flush_lock ->&rq->__lock ->&x->wait#33 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&folio_wait_table[i] ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&sb->map[i].swap_lock ->&rcu_state.gp_wq ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&n->list_lock ->&fs_info->chunk_mutex ->&rcu_state.expedited_wq FD: 621 BD: 1 +.+.: &type->s_umount_key#55/1 ->fs_reclaim ->&c->lock ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->percpu_counters_lock ->&x->wait#8 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&____s->seqcount#2 ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#36 ->cgwb_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->bdi_lock ->&zone->lock ->&xa->xa_lock#5 ->stock_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->inode_hash_lock ->lock#2 ->&mapping->i_private_lock ->&sb->s_type->i_lock_key#3 ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&xa->xa_lock#9 ->tk_core.seq.seqcount ->&folio_wait_table[i] ->&rq->__lock ->(console_sem).lock ->crypto_alg_sem ->&fs_info->super_lock ->wq_pool_mutex ->&fs_info->chunk_mutex ->&eb->refs_lock ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->rcu_node_0 ->&rcu_state.gp_wq ->bit_wait_table + i ->uuid_mutex ->btrfs-root-00 ->&fs_info->global_root_lock ->&fs_devs->device_list_mutex ->&fs_info->fs_roots_radix_lock ->&root->objectid_mutex ->btrfs-dev-00 ->btrfs-extent-00 ->&fs_info->block_group_cache_lock ->&space_info->lock ->&space_info->groups_sem ->&(&fs_info->profiles_lock)->lock ->&tree->lock ->&ctl->tree_lock ->&discard_ctl->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->unnamed_dev_ida.xa_lock ->btrfs-tree-01 ->btrfs-tree-00 ->btrfs-dreloc-00 ->&fs_info->cleanup_work_sem ->&fs_info->cleaner_mutex ->&fs_info->balance_mutex ->&fs_info->unused_bgs_lock ->&xa->xa_lock#21 ->&sb->s_type->i_lock_key#38 ->&dentry->d_lock ->&n->list_lock ->&rsv->lock ->sb_internal#4 ->&fs_info->scrub_pause_wait ->&sem->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock ->&x->wait#25 ->(&timer.timer) ->remove_cache_srcu ->pool_lock ->&sb->map[i].swap_lock ->semaphore->lock#4 ->&rcu_state.expedited_wq ->key ->pcpu_lock ->wq_pool_mutex.wait_lock FD: 59 BD: 617 +.+.: &sb->s_type->i_lock_key#38 ->&dentry->d_lock ->&xa->xa_lock#9 FD: 1 BD: 1 ....: printk_limits[6].lock FD: 327 BD: 2 .+.+: sb_writers#14 ->mount_lock ->&ovl_i_mutex_dir_key[depth]/1 ->&ovl_i_mutex_dir_key[depth]#2 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#37 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->inode_hash_lock ->&obj_hash[i].lock ->&fsnotify_mark_srcu ->&ovl_i_mutex_key[depth] FD: 315 BD: 3 +.+.: &ovl_i_mutex_dir_key[depth]/1 ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&____s->seqcount#2 ->&rq->__lock ->&____s->seqcount ->&c->lock ->&dentry->d_lock ->&type->i_mutex_dir_key#9 ->pool_lock#2 ->&obj_hash[i].lock ->tomoyo_ss ->sb_writers#12 ->inode_hash_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#37 ->smack_known_lock ->&ovl_i_mutex_key[depth] ->&type->i_mutex_dir_key#5 ->&xattrs->lock ->&ovl_i_lock_key[depth] ->sb_writers#5 ->rename_lock ->krc.lock ->&n->list_lock ->&ovl_i_mutex_dir_key[depth]#2 ->&fsnotify_mark_srcu ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&type->i_mutex_dir_key#15 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&type->i_mutex_dir_key#3 ->&ei->xattr_sem ->sb_writers#3 ->&sb->s_type->i_mutex_key#27 ->sb_writers#22 ->&ovl_i_mutex_key[depth]/4 ->&u->bindlock FD: 37 BD: 16 +.+.: smack_known_lock ->&obj_hash[i].lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&c->lock FD: 46 BD: 418 +.+.: &fs_info->super_lock ->(console_sem).lock FD: 298 BD: 4 +.+.: &ovl_i_mutex_key[depth] ->&dentry->d_lock ->tomoyo_ss ->&ovl_i_lock_key[depth] ->sb_writers#12 ->&sb->s_type->i_lock_key#37 ->&sb->s_type->i_lock_key#23 ->&ei->xattr_sem ->sb_writers#3 ->&rq->__lock ->tk_core.seq.seqcount FD: 327 BD: 7 +.+.: &ovl_i_lock_key[depth] ->mount_lock ->fs_reclaim ->pool_lock#2 ->sb_writers#12 ->stock_lock ->&obj_hash[i].lock ->&c->lock ->lock#10 ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->sb_writers#5 ->sb_writers#20 ->sb_writers#3 ->sb_writers#22 ->&sb->s_type->i_lock_key#23 ->&dentry->d_lock ->tomoyo_ss ->&sb->s_type->i_mutex_key#8 ->mapping.invalidate_lock ->&____s->seqcount ->lock#4 ->lock#5 ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&wb->list_lock ->&fq->mq_flush_lock ->&x->wait#25 FD: 1 BD: 161 ....: &ifs->state_lock FD: 1 BD: 2 ....: &dqp->q_pinwait FD: 29 BD: 85 ....: &sk->sk_lock.wq ->&p->pi_lock FD: 14 BD: 399 +.+.: &mru->lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock FD: 164 BD: 174 +.+.: btrfs-root-01#2/1 ->&rq->__lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&eb->refs_lock ->&xa->xa_lock#9 ->btrfs-root-00 ->btrfs-root-00/1 ->&fs_info->ref_verify_lock ->&c->lock ->lock#4 ->&sem->wait_lock ->&p->pi_lock ->key#25 ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 169 ....: &pag->pagb_wait FD: 1 BD: 2 +.+.: (wq_completion)xfsdiscard FD: 162 BD: 176 +.+.: &fs_info->chunk_mutex ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&fs_info->mapping_tree_lock ->&eb->refs_lock ->&obj_hash[i].lock ->&space_info->lock ->&rsv->lock ->&tree->lock ->&ctl->tree_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&fs_info->global_root_lock ->&(&fs_info->profiles_lock)->lock ->btrfs-chunk-00 ->btrfs-chunk-00/1 ->&rq->__lock FD: 89 BD: 200 ++++: &fs_info->mapping_tree_lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&tree->lock ->&obj_hash[i].lock FD: 88 BD: 240 +.+.: &tree->lock ->&obj_hash[i].lock ->pool_lock#2 ->&state->wq ->&ei->lock ->&root->delalloc_lock ->&rsv->lock ->&root->qgroup_meta_rsv_lock ->&fs_info->qgroup_lock ->&space_info->lock ->&c->lock ->&____s->seqcount ->key#26 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 1 BD: 832 +.+.: &eb->refs_lock FD: 10 BD: 407 +.+.: &fs_info->buffer_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 1 BD: 183 ++++: &fs_info->dev_replace.rwsem FD: 29 BD: 4 ....: &cprc->ckpt_wait_queue ->&p->pi_lock FD: 99 BD: 1 +.+.: (work_completion)(&bbio->end_io_work) ->bit_wait_table + i ->&eb->refs_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&folio_wait_table[i] ->mmu_notifier_invalidate_range_start ->&tree->lock ->(console_sem).lock FD: 162 BD: 176 ++++: btrfs-root-00 ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-root-00/1 ->key#25 ->&rq->__lock ->&c->lock ->&____s->seqcount#2 ->&cluster->lock FD: 1 BD: 200 ++++: &fs_info->global_root_lock FD: 48 BD: 401 +.+.: &fs_info->fs_roots_radix_lock ->pool_lock#2 ->&fs_info->qgroup_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 187 BD: 167 +.+.: &root->objectid_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->btrfs-dreloc-00 ->&obj_hash[i].lock ->btrfs-tree-01 ->btrfs-tree-00 ->&eb->refs_lock ->&c->lock FD: 138 BD: 184 ++++: btrfs-dreloc-00 ->&rsv->lock ->&space_info->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-dreloc-00/1 ->key#25 ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&c->lock ->&sb->s_type->i_lock_key#38 ->&ei->lock FD: 140 BD: 174 ++++: btrfs-dev-00 ->&fs_info->mapping_tree_lock ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-dev-00/1 ->key#25 ->&c->lock ->&cluster->lock ->rcu_node_0 FD: 169 BD: 191 ++++: btrfs-extent-00 ->&fs_info->mapping_tree_lock ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-extent-00/1 ->key#25 ->&fs_info->trans_lock ->&cur_trans->delayed_refs.lock ->&fs_info->ref_verify_lock ->&c->lock ->&n->list_lock ->&fs_info->global_root_lock ->btrfs-free-space-00 ->&obj_hash[i].lock ->&space_info->lock ->&rq->__lock ->&____s->seqcount#2 ->btrfs-extent-01/7 ->btrfs-extent-00/2 ->btrfs-extent-00/6 ->btrfs-extent-00/3 ->&cluster->lock FD: 5 BD: 222 ++++: &fs_info->block_group_cache_lock ->&obj_hash[i].lock ->pool_lock#2 ->&cache->lock#2 FD: 42 BD: 244 +.+.: &space_info->lock ->&rsv->lock ->&cache->lock#2 ->key#27 ->key#26 ->&____s->seqcount#12 ->&fs_info->balance_lock ->&ticket.wait FD: 58 BD: 218 ++++: &space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&cache->lock#2 ->&fs_info->block_group_cache_lock ->&ctl->tree_lock ->&space_info->lock ->&c->lock ->&n->list_lock ->&cache->data_rwsem ->&rq->__lock ->&cluster->refill_lock ->&caching_ctl->wait ->&fs_info->relocation_bg_lock FD: 2 BD: 177 +.+.: &(&fs_info->profiles_lock)->lock ->&____s->seqcount#12 FD: 1 BD: 246 +.+.: &____s->seqcount#12 FD: 5 BD: 224 +.+.: &ctl->tree_lock ->&obj_hash[i].lock ->pool_lock#2 ->&cluster->lock FD: 15 BD: 169 +.+.: &discard_ctl->lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 245 +.+.: &fs_info->balance_lock FD: 1 BD: 248 +.+.: &rsv->lock FD: 308 BD: 1 +.+.: &type->s_umount_key#56 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#37 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->inode_hash_lock ->pool_lock#2 ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#36 ->sb_internal#3 ->&rq->__lock ->&dentry->d_lock/1 ->&type->s_umount_key#57 ->stock_lock ->rcu_node_0 ->mount_lock ->unnamed_dev_ida.xa_lock ->&lru->node[i].lock ->&type->s_umount_key#69 ->&sb->s_type->i_lock_key ->&rnp->exp_wq[3] ->&type->s_umount_key#32 ->&sb->s_type->i_lock_key#23 ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&type->s_umount_key#75 ->&sb->s_type->i_lock_key#45 ->&wb->list_lock ->sb_internal ->krc.lock ->&type->s_umount_key#73 ->&sb->s_type->i_lock_key#44 ->&rnp->exp_lock FD: 162 BD: 2 ++++: &type->s_umount_key#57 ->pool_lock#2 ->rcu_node_0 ->&rq->__lock ->&sb->s_type->i_lock_key#36 ->&xa->xa_lock#9 ->lock#4 ->lock#5 ->&wb->list_lock ->&nilfs->ns_sem ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&sci->sc_state_lock ->&sci->sc_wait_daemon ->&sci->sc_wait_request ->tk_core.seq.seqcount ->&fq->mq_flush_lock ->&x->wait#25 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/1 ->&bdi->wb_waitq ->&s->s_inode_list_lock ->&mapping->i_private_lock ->stock_lock ->&lruvec->lru_lock ->&bmap->b_sem ->inode_hash_lock ->&fsnotify_mark_srcu ->&nilfs->ns_segctor_sem ->&sci->sc_wait_task ->&nilfs_bmap_mdt_lock_key ->&nilfs->ns_inode_lock ->&nilfs->ns_cptree_lock ->&root->kernfs_rwsem ->sysfs_symlink_target_lock ->kernfs_idr_lock ->&k->list_lock ->&x->wait#34 ->&cache->lock ->&____s->seqcount ->&nilfs_bmap_dat_lock_key FD: 187 BD: 180 ++++: btrfs-tree-01 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->btrfs-tree-00 ->rcu_node_0 ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->btrfs-tree-01/1 ->key#25 ->&____s->seqcount#2 ->&c->lock ->btrfs-tree-00/1 ->&sem->wait_lock ->btrfs-tree-00/6 ->&swapped_blocks->lock ->btrfs-treloc-01 FD: 31 BD: 1 +.+.: &fs_info->transaction_kthread_mutex ->&fs_info->trans_lock ->&p->pi_lock ->&rq->__lock FD: 5 BD: 211 +.+.: &fs_info->trans_lock ->&fs_info->transaction_blocked_wait ->&obj_hash[i].lock ->pool_lock#2 FD: 179 BD: 182 ++++: btrfs-tree-00 ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-tree-00/1 ->key#25 ->pool_lock#2 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#38 ->&obj_hash[i].lock ->&cur_trans->delayed_refs.lock ->&fs_info->global_root_lock ->&fs_info->fs_roots_radix_lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&fs_info->ref_verify_lock ->btrfs-tree-00/2 ->btrfs-tree-00/4 ->btrfs-tree-00/6 ->&rq->__lock ->&cluster->lock ->&space_info->lock ->remove_cache_srcu ->&wsm.lock ->&wsm->ws_lock ->btrfs-extent-00 FD: 163 BD: 3 ++++: &fs_info->cleanup_work_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->btrfs-root-00 ->&obj_hash[i].lock ->&c->lock FD: 312 BD: 6 +.+.: &fs_info->cleaner_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->btrfs-root-00 ->&obj_hash[i].lock ->&fs_info->delayed_iput_lock ->&fs_info->trans_lock ->&root->kernfs_rwsem ->fs_reclaim ->lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&sb->s_type->i_lock_key#38 ->&fs_info->delayed_iputs_wait ->&rq->__lock ->&rsv->lock ->&space_info->lock ->&fs_info->reloc_mutex ->sb_internal#4 ->tk_core.seq.seqcount ->&fs_info->scrub_pause_wait ->btrfs_trans_num_writers ->&sb->s_type->i_mutex_key#23 ->&tree->lock ->&tree->lock#2 ->mapping.invalidate_lock#4 ->&folio_wait_table[i] ->&ei->lock ->&xa->xa_lock#9 ->&wb->list_lock ->&fs_info->fs_roots_radix_lock ->&xa->xa_lock#21 FD: 442 BD: 5 +.+.: &fs_info->balance_mutex ->fs_reclaim ->pool_lock#2 ->&____s->seqcount#12 ->mmu_notifier_invalidate_range_start ->&rsv->lock ->&space_info->lock ->sb_internal#4 ->tk_core.seq.seqcount ->&fs_info->scrub_pause_wait ->&obj_hash[i].lock ->&fs_info->balance_lock ->(console_sem).lock ->rcu_node_0 ->&rq->__lock ->&fs_info->super_lock ->&root->kernfs_rwsem ->kernfs_notify_lock ->&fs_info->balance_wait_q ->&mm->mmap_lock FD: 1 BD: 169 +.+.: &fs_info->unused_bgs_lock FD: 1 BD: 13 ....: &fs_info->delayed_iput_lock FD: 29 BD: 172 ....: &fs_info->transaction_wait ->&p->pi_lock FD: 314 BD: 3 +.+.: &fs_info->reclaim_bgs_lock ->&fs_info->unused_bgs_lock ->&discard_ctl->lock ->&space_info->groups_sem ->btrfs-chunk-00 ->&fs_info->block_group_cache_lock ->&space_info->lock ->&fs_info->scrub_lock ->&fs_info->global_root_lock ->&fs_info->swapfile_pins_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->sb_internal#4 ->btrfs_trans_num_writers ->&obj_hash[i].lock ->&cache->lock#2 ->btrfs-root-01#2 ->btrfs-root-00 ->&eb->refs_lock ->&rsv->lock ->(console_sem).lock ->&rq->__lock ->&fs_info->ordered_operations_mutex ->&fs_info->cleaner_mutex ->&sb->s_type->i_lock_key#38 ->&s->s_inode_list_lock ->&tree->lock#2 ->&tree->lock ->&xa->xa_lock#21 ->&xa->xa_lock#9 ->inode_hash_lock ->&fsnotify_mark_srcu ->&ei->ordered_tree_lock ->&fs_info->scrub_pause_wait ->&fs_info->mapping_tree_lock ->&c->lock ->lock#4 ->lock#5 ->btrfs-dreloc-00 ->btrfs-extent-00 ->btrfs-csum-00 ->&ei->lock ->&root->ordered_extent_lock ->&fs_info->dev_replace.rwsem ->tk_core.seq.seqcount ->&folio_wait_table[i] ->btrfs_ordered_extent ->&entry->wait ->&lruvec->lru_lock ->&cfs_rq->removed.lock ->&wb->list_lock ->&n->list_lock FD: 29 BD: 26 ....: &sci->sc_wait_request ->&p->pi_lock FD: 1 BD: 26 ....: (&sci->sc_timer) FD: 60 BD: 167 +.+.: &xa->xa_lock#21 ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->&sb->s_type->i_lock_key#38 FD: 1 BD: 3 +.+.: &vb->lock FD: 624 BD: 1 ++++: &type->s_umount_key#58 ->&obj_hash[i].lock ->pool_lock#2 ->&x->wait#23 ->shrinker_mutex ->rename_lock.seqcount ->&dentry->d_lock ->&rq->__lock ->&sb->s_type->i_lock_key#38 ->&dentry->d_lock/1 ->rcu_node_0 ->&rcu_state.gp_wq ->&mapping->i_private_lock ->btrfs-tree-01 ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#9 ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->&wq->list_lock ->&eb->refs_lock ->btrfs-tree-00 ->&c->lock ->btrfs-csum-00 ->lock#4 ->lock#5 ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&fs_info->ordered_operations_mutex ->&fs_info->trans_lock ->btrfs_trans_num_writers ->btrfs_trans_completed ->tk_core.seq.seqcount ->&fs_info->scrub_pause_wait ->&s->s_inode_list_lock ->&tree->lock#2 ->&tree->lock ->inode_hash_lock ->&fsnotify_mark_srcu ->&ei->ordered_tree_lock ->&xa->xa_lock#21 ->&____s->seqcount#2 ->&____s->seqcount ->&lruvec->lru_lock ->&delayed_node->mutex ->(console_sem).lock ->&p->pi_lock ->&x->wait ->&fs_info->qgroup_rescan_lock ->semaphore->lock#4 ->&fs_info->balance_mutex ->&fs_info->dev_replace.lock_finishing_cancel_unmount ->&fs_info->scrub_lock ->&fs_info->defrag_inodes_lock ->&wq->mutex ->&fs_info->delayed_iput_lock ->&base->lock ->&discard_ctl->lock ->&fs_info->unused_bgs_lock ->&fs_info->cleaner_mutex ->&fs_info->cleanup_work_sem ->&root->kernfs_rwsem ->sysfs_symlink_target_lock ->kernfs_idr_lock ->key#26 ->key#27 ->&x->wait#36 ->&k->list_lock ->&fs_info->block_group_cache_lock ->&cache->lock#2 ->&fs_info->buffer_lock ->wq_mayday_lock ->wq_pool_mutex ->&cfs_rq->removed.lock ->&fs_info->fs_roots_radix_lock ->&fs_info->zone_active_bgs_lock ->&space_info->groups_sem ->&ctl->tree_lock ->&rsv->lock ->&space_info->lock ->&wb->list_lock ->uuid_mutex ->&bdi->wb_waitq ->&sem->wait_lock ->remove_cache_srcu ->&folio_wait_table[i] ->key ->pcpu_lock ->percpu_counters_lock ->hrtimer_bases.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock ->&meta->lock ->&rcu_state.expedited_wq FD: 275 BD: 3 ++++: &type->i_mutex_dir_key#10 ->&xa->xa_lock#21 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->btrfs-tree-01 ->btrfs-tree-00 ->&eb->refs_lock ->&obj_hash[i].lock ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->tomoyo_ss ->&____s->seqcount ->inode_hash_lock ->&sb->s_type->i_lock_key#38 ->&c->lock ->&____s->seqcount#2 ->&s->s_inode_list_lock ->&space_info->lock ->&rsv->lock ->sb_internal#4 ->btrfs_trans_num_writers ->&rq->__lock ->&n->list_lock FD: 29 BD: 25 ..-.: &x->wait#29 ->&p->pi_lock FD: 1 BD: 2 ....: &x->wait#30 FD: 1 BD: 2 ....: &pag->pag_active_wq FD: 1 BD: 2 ....: &x->wait#31 FD: 1 BD: 2 ....: &x->wait#32 FD: 139 BD: 177 ++++: btrfs-csum-00 ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-csum-00/1 ->key#25 ->&sem->wait_lock ->&rq->__lock ->stock_lock ->&space_info->lock FD: 76 BD: 4 +.+.: devpts_mutex ->&xa->xa_lock#23 ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#26 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->cdev_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 190 ....: key#24 FD: 1 BD: 1 ....: (&bdi->laptop_mode_wb_timer) FD: 461 BD: 2 .+.+: sb_writers#15 ->mount_lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&fs_info->subvol_sem ->&obj_hash[i].lock ->&type->i_mutex_dir_key#10 ->&sb->s_type->i_mutex_key#23 ->&ei->lock ->&sem->wait_lock ->&p->pi_lock ->&tree->lock#2 ->&tree->lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&sb->s_type->i_lock_key#38 ->lock#4 ->lock#5 ->&lruvec->lru_lock ->rcu_node_0 ->&rq->__lock ->&xa->xa_lock#9 ->mmu_notifier_invalidate_range_start ->btrfs-tree-01 ->btrfs-tree-00 ->&eb->refs_lock ->&fs_info->global_root_lock ->btrfs-extent-00 ->btrfs-csum-00 ->&fs_info->block_group_cache_lock ->&cache->lock#2 ->&ei->ordered_tree_lock ->&root->ordered_extent_lock ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->tk_core.seq.seqcount ->sb_internal#4 ->btrfs_trans_num_writers ->&____s->seqcount#12 ->&fs_info->balance_lock ->&space_info->groups_sem ->&n->list_lock ->&fs_info->defrag_inodes_lock ->fsverity_hash_alg_init_mutex ->mapping.invalidate_lock#4 ->&wsm->ws_lock ->&wq->list_lock ->&folio_wait_table[i] ->&fs_info->super_lock ->&fs_info->balance_mutex ->&fs_info->reclaim_bgs_lock ->btrfs-chunk-00 ->&type->i_mutex_dir_key#10/1 FD: 302 BD: 3 +.+.: &fs_info->subvol_sem ->&fs_info->qgroup_ioctl_lock ->&space_info->lock ->&rsv->lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->sb_internal#4 ->tk_core.seq.seqcount ->&fs_info->scrub_pause_wait ->&obj_hash[i].lock ->&root->root_item_lock FD: 208 BD: 167 +.+.: &fs_info->qgroup_ioctl_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->mmu_notifier_invalidate_range_start ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->&eb->refs_lock ->btrfs-quota-00 ->btrfs-root-00 ->btrfs-root-00/1 ->btrfs-root-01/7 ->btrfs-root-00/6 ->&obj_hash[i].lock ->btrfs-root-01#2 ->&c->lock ->&____s->seqcount#2 ->&fs_info->qgroup_lock ->&fs_info->qgroup_rescan_lock ->&rq->__lock FD: 300 BD: 164 .+.+: sb_internal#4 ->&fs_info->trans_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->btrfs_trans_num_writers ->btrfs_trans_completed ->&obj_hash[i].lock ->btrfs_trans_unblocked ->&fs_info->transaction_wait ->&rq->__lock ->&c->lock ->&n->list_lock FD: 299 BD: 165 ++++: btrfs_trans_num_writers ->btrfs_trans_num_extwriters ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&delayed_root->lock ->&obj_hash[i].lock ->btrfs_trans_pending_ordered ->&fs_info->scrub_lock ->&fs_info->trans_lock ->&c->lock ->&fs_info->ordered_operations_mutex ->btrfs-tree-00/1 ->btrfs-tree-00 FD: 293 BD: 166 ++++: btrfs_trans_num_extwriters ->&fs_info->trans_lock ->&rsv->lock ->&fs_info->qgroup_ioctl_lock ->btrfs_trans_commit_prep ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&delayed_root->lock ->&obj_hash[i].lock ->&fs_info->reloc_mutex ->&root->root_item_lock ->&xa->xa_lock#21 ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&delayed_node->mutex ->&ei->lock ->&tree->lock#2 ->btrfs-tree-01 ->btrfs-tree-01/1 ->btrfs-tree-00/1 ->&eb->refs_lock ->&tree->lock ->&fs_info->qgroup_lock ->&root->qgroup_meta_rsv_lock ->btrfs-tree-00 ->&cur_trans->delayed_refs.lock ->&fs_info->block_group_cache_lock ->&space_info->lock ->&rq->__lock ->&cache->lock#2 ->&fs_info->global_root_lock ->btrfs-csum-00 ->btrfs-csum-00/1 ->btrfs-free-space-00 ->btrfs-free-space-00/1 ->&cache->free_space_lock ->btrfs-extent-00 ->&root->objectid_mutex ->inode_hash_lock ->&sb->s_type->i_lock_key#38 ->btrfs-extent-00/1 ->&cur_trans->dirty_bgs_lock ->&fs_info->unused_bgs_lock ->&fs_info->ref_verify_lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->chunk_mutex ->&fs_info->mapping_tree_lock ->&fs_devs->device_list_mutex ->&root->log_mutex ->&ei->log_mutex ->&fs_info->tree_log_mutex ->&fs_info->qgroup_rescan_lock ->&fs_info->commit_root_sem ->&n->list_lock ->&cur_trans->writer_wait ->btrfs-tree-00/6 ->remove_cache_srcu ->&fs_info->super_lock ->btrfs-root-00 ->btrfs-uuid-00 ->btrfs-root-00/1 ->btrfs-root-01#2 ->btrfs-root-01#2/1 ->btrfs_trans_completed ->&cur_trans->commit_wait ->&fs_info->ro_block_group_mutex ->btrfs-dreloc-00 ->btrfs-dreloc-00/1 ->fs_reclaim ->stock_lock ->&cluster->refill_lock ->&fs_info->relocation_bg_lock ->&cur_trans->cache_write_mutex ->&space_info->groups_sem ->&ctl->tree_lock ->&fs_info->fs_roots_radix_lock ->lock#4 ->btrfs-uuid-00/1 ->btrfs-dev-00 ->btrfs-dev-00/1 ->&rc->reloc_root_tree.lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->btrfs-treloc-01 ->&xa->xa_lock#9 ->lock FD: 4 BD: 246 +.+.: &cache->lock#2 ->&obj_hash[i].lock ->pool_lock#2 FD: 162 BD: 176 ++++: btrfs-quota-00 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#38 ->&wb->list_lock ->&wb->work_lock ->&eb->refs_lock ->key#25 ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&rq->__lock ->stock_lock ->lock#4 ->&mapping->i_private_lock ->lock ->btrfs-quota-00/1 ->&n->list_lock ->&fs_info->ref_verify_lock ->&sem->wait_lock FD: 8 BD: 227 +.+.: &cur_trans->delayed_refs.lock ->&head_ref->lock ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount FD: 6 BD: 228 +.+.: &head_ref->lock ->&fs_info->tree_mod_log_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rsv->lock FD: 161 BD: 177 +.+.: btrfs-root-00/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&rq->__lock ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&eb->refs_lock ->&xa->xa_lock#9 ->key#25 ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&____s->seqcount ->stock_lock ->lock#4 ->&mapping->i_private_lock ->lock ->btrfs-root-01/7 ->&c->lock ->&____s->seqcount#2 ->&fs_info->buffer_lock ->&obj_hash[i].lock ->&fs_info->ref_verify_lock ->remove_cache_srcu ->&sb->s_type->i_lock_key#38 ->&wb->list_lock ->&wb->work_lock FD: 229 BD: 1 +.+.: &type->s_umount_key#62/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->&fc->lock ->&obj_hash[i].lock ->percpu_counters_lock ->&x->wait#8 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&rq->__lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#36 ->cgwb_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->bdi_lock ->inode_hash_lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&xa->xa_lock#5 ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#40 ->&fi->lock ->fuse_mutex ->&fc->bg_lock ->&dentry->d_lock ->&sem->wait_lock ->&p->pi_lock FD: 309 BD: 1 +.+.: (work_completion)(&work->normal_work) ->&wq->thres_lock ->&caching_ctl->mutex ->&caching_ctl->wait ->btrfs_ordered_extent ->&obj_hash[i].lock ->pool_lock#2 ->&wq->list_lock ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&ei->ordered_tree_lock ->&blkg->async_bio_lock ->&rq->__lock ->&fs_info->delayed_iput_lock ->&p->pi_lock ->sb_internal#4 ->&fs_info->scrub_pause_wait ->&fs_info->qgroup_rescan_lock ->&space_info->lock ->&rsv->lock ->btrfs_trans_num_writers ->(console_sem).lock ->rcu_node_0 ->&sb->map[i].swap_lock ->&wsm->ws_lock ->&____s->seqcount ->&wsm.lock ->&compr_pool.lock ->&tree->lock ->remove_cache_srcu ->&xa->xa_lock#9 ->&folio_wait_table[i] ->lock#4 ->lock#5 FD: 1 BD: 2 +.+.: &wq->thres_lock FD: 108 BD: 2 +.+.: &caching_ctl->mutex ->&fs_info->commit_root_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&tree->lock ->&obj_hash[i].lock FD: 107 BD: 172 ++++: &fs_info->commit_root_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&fs_info->global_root_lock ->&tree->lock ->&ctl->tree_lock ->&obj_hash[i].lock ->&cache->lock#2 ->&eb->refs_lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->&swapped_blocks->lock ->&cur_trans->dropped_roots_lock ->&caching_ctl->wait ->lock#4 ->&fs_info->fs_roots_radix_lock ->&n->list_lock FD: 1 BD: 229 ....: key#25 FD: 29 BD: 221 ....: &caching_ctl->wait ->&p->pi_lock FD: 138 BD: 178 +.+.: btrfs-root-01/7 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&root->accounting_lock ->&xa->xa_lock#9 ->&eb->refs_lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&____s->seqcount ->stock_lock ->lock#4 ->&mapping->i_private_lock ->lock ->btrfs-root-00/6 ->&c->lock ->&fs_info->ref_verify_lock FD: 1 BD: 209 +.+.: &root->accounting_lock FD: 103 BD: 179 +.+.: btrfs-root-00/6 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&root->accounting_lock ->&xa->xa_lock#9 ->key#25 ->&eb->refs_lock ->&obj_hash[i].lock ->&c->lock ->&fs_info->ref_verify_lock ->&____s->seqcount FD: 165 BD: 173 ++++: btrfs-root-01#2 ->btrfs-root-00 ->lock#4 ->&eb->refs_lock ->key#25 ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->&mapping->i_private_lock ->lock ->btrfs-root-01#2/1 ->btrfs-root-00/1 ->&rq->__lock ->&sem->wait_lock FD: 184 BD: 167 .+.+: btrfs_trans_commit_prep ->&rsv->lock ->&cur_trans->delayed_refs.lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&cur_trans->dirty_bgs_lock ->&fs_info->ro_block_group_mutex ->&cur_trans->cache_write_mutex ->&____s->seqcount ->&c->lock ->&____s->seqcount#2 ->&fs_info->trans_lock FD: 178 BD: 1 +.+.: &head_ref->mutex ->&head_ref->lock ->&fs_info->global_root_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->btrfs-extent-00 ->btrfs-extent-00/1 ->&eb->refs_lock ->&fs_info->block_group_cache_lock ->&cache->free_space_lock ->&obj_hash[i].lock ->&fs_info->delalloc_root_lock ->&space_info->lock ->&tree->lock ->&cur_trans->dirty_bgs_lock ->&rsv->lock ->&cur_trans->delayed_refs.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&meta->lock ->kfence_freelist_lock ->&fs_info->qgroup_lock ->&rq->__lock ->&fs_info->unused_bgs_lock ->&n->list_lock ->btrfs-extent-01/7 ->btrfs-extent-00/6 ->btrfs-extent-01#2 ->btrfs-extent-01#2/1 ->&cache->lock#2 ->&caching_ctl->wait ->btrfs-csum-00 ->btrfs-csum-00/1 FD: 1 BD: 229 .+.+: &fs_info->tree_mod_log_lock FD: 129 BD: 192 +.+.: btrfs-extent-00/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&fs_info->trans_lock ->&eb->refs_lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#38 ->&wb->list_lock ->&wb->work_lock ->&rq->__lock ->&fs_info->ref_verify_lock ->key#25 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&____s->seqcount#2 ->&n->list_lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->&cache->lock#2 ->&fs_info->mapping_tree_lock FD: 170 BD: 173 +.+.: &cache->free_space_lock ->&fs_info->global_root_lock ->btrfs-free-space-00 ->btrfs-free-space-00/1 ->&eb->refs_lock ->rcu_node_0 ->&rq->__lock ->btrfs-extent-00 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock FD: 160 BD: 192 ++++: btrfs-free-space-00 ->&rq->__lock ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-free-space-00/1 ->key#25 ->&space_info->lock ->&cluster->lock FD: 128 BD: 193 +.+.: btrfs-free-space-00/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&obj_hash[i].lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&fs_info->trans_lock ->&eb->refs_lock ->&xa->xa_lock#9 ->key#25 ->&c->lock ->&fs_info->ref_verify_lock ->&____s->seqcount#2 ->&____s->seqcount ->&sb->s_type->i_lock_key#38 ->&wb->list_lock ->&wb->work_lock ->&rq->__lock ->&n->list_lock ->&fs_info->buffer_lock ->&mapping->i_private_lock FD: 1 BD: 242 +.+.: &fs_info->delalloc_root_lock FD: 1 BD: 174 +.+.: &cur_trans->dirty_bgs_lock FD: 163 BD: 168 +.+.: &fs_info->ro_block_group_mutex ->&____s->seqcount#12 ->&fs_info->balance_lock ->&space_info->lock ->&fs_info->chunk_mutex FD: 173 BD: 168 +.+.: &cur_trans->cache_write_mutex ->&cur_trans->dirty_bgs_lock ->&fs_info->global_root_lock ->&cache->lock#2 ->btrfs-extent-00 ->&eb->refs_lock ->&rsv->lock ->&space_info->lock ->btrfs-extent-01#2 FD: 1 BD: 212 ....: &fs_info->transaction_blocked_wait FD: 1 BD: 172 +.+.: &delayed_root->lock FD: 87 BD: 166 ++++: btrfs_trans_pending_ordered ->&ei->lock ->&rsv->lock ->&ei->ordered_tree_lock ->&root->qgroup_meta_rsv_lock ->&fs_info->qgroup_lock ->key#27 ->&space_info->lock ->&fs_info->trans_lock ->&cur_trans->pending_wait FD: 1 BD: 166 +.+.: &fs_info->scrub_lock FD: 244 BD: 167 ++++: btrfs_trans_completed ->btrfs_trans_super_committed ->&fs_info->unused_bg_unpin_mutex ->&obj_hash[i].lock ->pool_lock#2 ->tk_core.seq.seqcount ->&discard_ctl->lock ->&cur_trans->commit_wait ->&rq->__lock ->quarantine_lock FD: 241 BD: 168 .+.+: btrfs_trans_super_committed ->btrfs_trans_unblocked ->&fs_info->tree_log_mutex ->&cur_trans->commit_wait ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock FD: 239 BD: 169 ++++: btrfs_trans_unblocked ->&fs_info->reloc_mutex ->&fs_info->tree_log_mutex ->&rq->__lock FD: 238 BD: 170 +.+.: &fs_info->reloc_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&delayed_root->lock ->&obj_hash[i].lock ->&cur_trans->delayed_refs.lock ->&fs_info->fs_roots_radix_lock ->btrfs-root-01#2 ->&eb->refs_lock ->&fs_devs->device_list_mutex ->&fs_info->dev_replace.rwsem ->btrfs-root-00 ->&fs_info->global_root_lock ->btrfs-extent-00 ->&fs_info->block_group_cache_lock ->&space_info->lock ->&tree->lock ->&cur_trans->dirty_bgs_lock ->&rsv->lock ->&cache->lock#2 ->&fs_info->commit_root_sem ->&fs_info->tree_log_mutex ->&rq->__lock ->&fs_info->qgroup_lock ->btrfs-root-01#2/1 ->btrfs-root-00/1 ->btrfs-quota-00 ->btrfs-quota-00/1 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&fs_info->chunk_mutex ->btrfs-log-00 ->&fs_info->qgroup_rescan_lock ->lock#4 ->&n->list_lock ->btrfs-extent-01#2 ->btrfs-log-01#2 ->rcu_node_0 ->&____s->seqcount#12 ->&fs_info->balance_lock ->&space_info->groups_sem ->stock_lock ->&xa->xa_lock#9 ->&mapping->i_private_lock ->lock ->btrfs-tree-01/7 ->&rc->reloc_root_tree.lock FD: 104 BD: 175 +.+.: btrfs-dev-00/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&obj_hash[i].lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&fs_info->trans_lock ->&eb->refs_lock ->&xa->xa_lock#9 ->&c->lock ->&n->list_lock ->&fs_info->ref_verify_lock ->key#25 FD: 4 BD: 186 +.+.: &swapped_blocks->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 173 +.+.: &cur_trans->dropped_roots_lock FD: 222 BD: 171 +.+.: &fs_info->tree_log_mutex ->&fs_info->trans_lock ->&fs_info->transaction_wait ->&tree->lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#38 ->&xa->xa_lock#9 ->&mapping->i_private_lock ->btrfs-extent-00 ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->tk_core.seq.seqcount ->&base->lock ->btrfs-free-space-00 ->btrfs-dev-00 ->lock#4 ->lock#5 ->btrfs-quota-00 ->btrfs-root-00 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&eb->refs_lock ->btrfs-root-01#2 ->&folio_wait_table[i] ->&rq->__lock ->&fs_info->global_root_lock ->&fs_devs->device_list_mutex ->btrfs-tree-01 ->btrfs-tree-00 ->rcu_node_0 ->btrfs-chunk-00 ->remove_cache_srcu ->&lock->wait_lock ->btrfs-extent-01#2 ->&p->pi_lock ->&n->list_lock ->&sb->map[i].swap_lock ->&wb->list_lock ->btrfs-dreloc-00 ->&rcu_state.expedited_wq ->btrfs-uuid-00 ->btrfs-csum-00 ->btrfs-treloc-01 ->key#36 ->&rcu_state.gp_wq FD: 29 BD: 248 ..-.: &x->wait#33 ->&p->pi_lock FD: 1 BD: 3 ....: &x->wait#34 FD: 35 BD: 1 ..-.: drivers/regulator/core.c:6379 FD: 94 BD: 1 +.-.: (&ndev->rs_timer) ->&ndev->lock ->&c->lock ->pool_lock#2 ->&dir->lock#2 ->&ul->lock#2 ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->init_task.mems_allowed_seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 29 BD: 169 ....: &cur_trans->commit_wait ->&p->pi_lock FD: 106 BD: 168 +.+.: &fs_info->unused_bg_unpin_mutex ->&tree->lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&fs_info->block_group_cache_lock ->&ctl->tree_lock ->tk_core.seq.seqcount ->&discard_ctl->lock ->&space_info->lock ->&fs_info->mapping_tree_lock ->&x->wait#25 ->&__ctx->lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq ->&base->lock ->(&timer.timer) ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&sb->map[i].swap_lock ->remove_cache_srcu FD: 1 BD: 14 ....: &fs_info->scrub_pause_wait FD: 47 BD: 417 +.+.: &fs_info->qgroup_lock ->&fs_info->super_lock FD: 4 BD: 2 +.+.: (regulator_init_complete_work).work ->&k->list_lock ->&k->k_lock FD: 304 BD: 9 +.+.: &ei->i_mmap_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->btrfs-tree-01 ->btrfs-tree-00 ->&eb->refs_lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->sb_internal#4 ->btrfs_trans_num_writers ->&space_info->lock ->fs_reclaim ->&tree->lock ->&fs_info->qgroup_lock ->&root->qgroup_meta_rsv_lock ->&ei->lock ->&rsv->lock ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&ei->ordered_tree_lock ->&tree->lock#2 ->&____s->seqcount#2 ->&c->lock ->&sb->s_type->i_lock_key#38 ->&wb->list_lock ->&rq->__lock ->lock#5 ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&fs_info->global_root_lock ->btrfs-extent-00 ->btrfs-csum-00 ->&cache->lock#2 ->&root->ordered_extent_lock ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->rcu_node_0 ->&folio_wait_table[i] ->btrfs_ordered_extent ->&entry->wait ->remove_cache_srcu ->&ei->i_mmap_lock/1 ->&rcu_state.expedited_wq FD: 1 BD: 167 +.+.: &root->root_item_lock FD: 189 BD: 167 +.+.: &delayed_node->mutex ->&fs_info->qgroup_lock ->&root->qgroup_meta_rsv_lock ->&space_info->lock ->&rsv->lock ->&sb->s_type->i_lock_key#38 ->&delayed_root->lock ->btrfs-tree-01 ->&eb->refs_lock ->btrfs-tree-01/1 ->btrfs-tree-00/1 ->btrfs-tree-00 ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 242 +.+.: &root->qgroup_meta_rsv_lock FD: 61 BD: 242 +.+.: &ei->lock ->&rsv->lock ->&sb->s_type->i_lock_key#38 FD: 1 BD: 174 ..-.: &ei->ordered_tree_lock FD: 90 BD: 173 ++++: &tree->lock#2 ->&obj_hash[i].lock ->pool_lock#2 ->&tree->lock ->key#28 FD: 1 BD: 241 ....: &state->wq FD: 61 BD: 241 +.+.: &root->delalloc_lock ->&fs_info->delalloc_root_lock ->&sb->s_type->i_lock_key#38 FD: 184 BD: 181 +.+.: btrfs-tree-01/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&eb->refs_lock ->&xa->xa_lock#9 ->lock#4 ->btrfs-tree-00 ->btrfs-tree-00/1 ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&obj_hash[i].lock ->&fs_info->block_group_cache_lock ->&space_info->lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->&sb->s_type->i_lock_key#38 ->&wb->list_lock ->&wb->work_lock ->&rq->__lock ->&fs_info->ref_verify_lock ->key#25 ->&fs_info->global_root_lock ->&fs_info->fs_roots_radix_lock ->&sem->wait_lock ->&p->pi_lock ->btrfs-treloc-01 ->btrfs-treloc-01/1 FD: 173 BD: 183 +.+.: btrfs-tree-00/1 ->mmu_notifier_invalidate_range_start ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->pool_lock#2 ->&eb->refs_lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->&obj_hash[i].lock ->&xa->xa_lock#9 ->key#25 ->&fs_info->block_group_cache_lock ->&space_info->lock ->&rq->__lock ->&fs_info->ref_verify_lock ->&c->lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#38 ->&____s->seqcount#2 ->&____s->seqcount ->&fs_info->global_root_lock ->&fs_info->fs_roots_radix_lock ->&sem->wait_lock ->&p->pi_lock ->&wb->list_lock ->&wb->work_lock ->btrfs-extent-00 ->btrfs-dreloc-00 ->&n->list_lock ->lock#4 FD: 1211 BD: 5 +.+.: &ctx->uring_lock ->fs_reclaim ->pool_lock#2 ->key#29 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->stock_lock ->&acct->lock ->&wq->lock ->&sighand->siglock ->&vn->pool_lock ->&vn->busy.lock ->init_mm.page_table_lock ->batched_entropy_u64.lock ->&obj_hash[i].lock ->&fs->lock ->&rq->__lock ->lock ->pidmap_lock ->cgroup_threadgroup_rwsem ->&p->pi_lock ->&xa->xa_lock#24 ->&mm->mmap_lock ->&table->hbs[i].lock ->&ctx->completion_lock#2 ->percpu_ref_switch_lock ->&base->lock ->&tctx->wait ->rcu_node_0 ->&n->list_lock ->&pf->rwait ->&ppp->rlock ->sk_lock-AF_INET6 ->slock-AF_INET6 ->(&timer.timer) ->&ei->socket.wq.wait ->sk_lock-AF_INET ->slock-AF_INET#2 ->&hashinfo->lock#2 ->&tty->ldisc_sem ->&rcu_state.expedited_wq ->quarantine_lock ->&lock->wait_lock ->&proc->inner_lock ->&thread->wait FD: 13 BD: 1 +.+.: &xa->xa_lock#22 ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock FD: 45 BD: 219 .+.+: &cache->data_rwsem ->&ctl->tree_lock ->&space_info->lock FD: 2 BD: 18 +.+.: &root->ordered_extent_lock ->&fs_info->ordered_root_lock FD: 1 BD: 170 +.+.: &fs_info->ordered_root_lock FD: 11 BD: 76 +...: &sctp_port_hashtable[i].lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 297 BD: 12 ++++: btrfs_ordered_extent ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->sb_internal#4 ->btrfs_trans_num_writers ->&obj_hash[i].lock ->btrfs_trans_pending_ordered ->&root->ordered_extent_lock ->&entry->wait ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->remove_cache_srcu ->&ei->lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 107 BD: 178 +.+.: btrfs-csum-00/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount ->&fs_info->trans_lock ->&eb->refs_lock ->&xa->xa_lock#9 ->&obj_hash[i].lock ->&fs_info->block_group_cache_lock ->&space_info->lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->&sem->wait_lock ->&p->pi_lock ->key#25 ->&rq->__lock FD: 29 BD: 13 ....: &entry->wait ->&p->pi_lock FD: 1 BD: 5 ....: &xa->xa_lock#23 FD: 1192 BD: 4 +.+.: &tty->legacy_mutex/1 ->&tty->ldisc_sem ->&tty->files_lock ->tasklist_lock ->&f->f_lock ->&tty->read_wait ->&tty->write_wait ->&tty->ctrl.lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 34 BD: 14 ++++: &o_tty->termios_rwsem/1 ->&tty->read_wait ->&vn->busy.lock ->&obj_hash[i].lock ->&vn->lazy.lock ->pool_lock#2 FD: 1 BD: 24 +.+.: &port->buf.lock/1 FD: 1 BD: 7 ....: &wq->list_lock FD: 1 BD: 3 +.+.: &ctx->napi_lock FD: 1 BD: 4 +.+.: &blkg->async_bio_lock FD: 38 BD: 1 +.+.: (wq_completion)blkcg_punt_bio ->(work_completion)(&blkg->async_bio_work) FD: 37 BD: 2 +.+.: (work_completion)(&blkg->async_bio_work) ->&blkg->async_bio_lock ->tk_core.seq.seqcount FD: 1 BD: 7 ....: &fs_info->delayed_iputs_wait FD: 2 BD: 166 +.+.: &fs_info->ordered_operations_mutex ->&fs_info->ordered_root_lock FD: 1 BD: 1 ....: driver_id_numbers.xa_lock FD: 46 BD: 1 -...: &dev->lock ->(console_sem).lock ->pool_lock#2 FD: 107 BD: 177 +.+.: btrfs-quota-00/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&fs_info->trans_lock ->&eb->refs_lock ->&xa->xa_lock#9 ->&fs_info->ref_verify_lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->&obj_hash[i].lock ->&rq->__lock ->&sem->wait_lock ->&p->pi_lock ->key#25 FD: 444 BD: 1 +.+.: &ctx->ring_lock ->pcpu_alloc_mutex ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#18 ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mm->mmap_lock ->aio_nr_lock ->&mm->ioctx_lock ->&c->lock FD: 1 BD: 2 +.+.: aio_nr_lock FD: 1 BD: 2 +.+.: &mm->ioctx_lock FD: 1 BD: 1 ....: &ctx->completion_lock FD: 165 BD: 172 +.+.: &fs_info->qgroup_rescan_lock ->&fs_info->global_root_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->btrfs-quota-00 ->btrfs-quota-00/1 ->&eb->refs_lock ->&obj_hash[i].lock ->&x->wait#44 FD: 1 BD: 3 ....: semaphore->lock#4 FD: 2 BD: 2 +.+.: &fs_info->dev_replace.lock_finishing_cancel_unmount ->&fs_info->dev_replace.rwsem FD: 4 BD: 4 +.+.: &fs_info->defrag_inodes_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 35 BD: 5 +.+.: &fc->lock ->&fc->bg_lock ->&fpq->lock ->&fiq->lock ->&fc->blocked_waitq FD: 1 BD: 245 ....: key#26 FD: 1 BD: 1 ....: &ctx->wait FD: 29 BD: 1 ..-.: &x->wait#35 ->&p->pi_lock FD: 1 BD: 245 ....: key#27 FD: 1 BD: 2 ....: &x->wait#36 FD: 14 BD: 69 +...: &dccp_hashinfo.bhash[i].lock ->&dccp_hashinfo.bhash2[i].lock ->stock_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 7 ....: key#29 FD: 1 BD: 76 +.-.: &icsk->icsk_accept_queue.rskq_lock#2 FD: 37 BD: 2 +.+.: (work_completion)(&(&bat_priv->tt.work)->work) ->key#15 ->key#30 ->&bat_priv->tt.req_list_lock ->&bat_priv->tt.roam_list_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 112 BD: 3 +.+.: &type->s_umount_key#59 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->pool_lock#2 ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#20 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&dentry->d_lock/1 ->&rq->__lock ->&mq_lock ->&info->lock#2 FD: 59 BD: 407 +.+.: &sb->s_type->i_lock_key#40 ->&dentry->d_lock ->&xa->xa_lock#9 FD: 446 BD: 1 +.+.: &tfile->napi_mutex ->bh_lock ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&mm->mmap_lock FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg2#35 ->(work_completion)(&peer->transmit_handshake_work) FD: 1 BD: 2 +.+.: &fs_info->zone_active_bgs_lock FD: 29 BD: 1 +.+.: &bdi->cgwb_release_mutex ->&rq->__lock ->cgwb_lock FD: 1 BD: 1 ..-.: &ctx->ctx_lock FD: 1 BD: 174 ....: key#28 FD: 1 BD: 1 +.+.: &table->cache_lock FD: 1 BD: 5 +.+.: &q->instances_lock FD: 1 BD: 5 +...: &log->instances_lock FD: 1 BD: 74 ...-: &f->f_owner.lock FD: 1184 BD: 13 +.+.: &tty->ldisc_sem/1 ->&tty->termios_rwsem ->tty_ldiscs_lock ->&obj_hash[i].lock ->pool_lock#2 ->disc_data_lock ->&pch->chan_sem ->&pch->upl ->&rq->__lock ->&pn->all_channels_lock ->&pf->rwait ->&dir->lock ->disc_data_lock#2 ->&sl->lock ->&x->wait#2 ->(&sl->keepalive_timer) ->&base->lock ->(&sl->outfill_timer) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_state.barrier_mutex ->lweventlist_lock ->stock_lock ->krc.lock ->&dir->lock#2 ->netdev_unregistering_wq.lock ->&buf->lock ->&port->buf.lock/1 ->&hu->proto_lock ->&sem->waiters ->&rsp->gp_wait ->&hdev->unregister_lock ->hci_dev_list_lock ->&x->wait#10 ->&hdev->cmd_sync_work_lock ->(pm_chain_head).rwsem ->&hdev->req_lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->&root->kernfs_rwsem ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->&k->k_lock ->&k->list_lock ->sysfs_symlink_target_lock ->subsys mutex#39 ->&x->wait#8 ->dpm_list_mtx ->&dev->power.lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->&c->lock ->&rfkill->lock ->uevent_sock_mutex ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->triggers_list_lock ->leds_list_lock ->&____s->seqcount ->subsys mutex#74 ->&dev->devres_lock ->&n->list_lock ->gdp_mutex ->pin_fs_lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key#3 ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#8 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->mount_lock ->&wq->mutex ->wq_pool_mutex ->&hdev->lock ->&xa->xa_lock#17 ->hci_index_ida.xa_lock ->pcpu_lock FD: 37 BD: 2 +.+.: (work_completion)(&tty->hangup_work) ->&obj_hash[i].lock ->pool_lock#2 ->&tty->files_lock ->stock_lock FD: 2 BD: 8 +.+.: &acct->lock ->&worker->lock FD: 2 BD: 6 +.+.: &wq->lock FD: 1 BD: 10 +.+.: &worker->lock FD: 4 BD: 6 +.+.: &xa->xa_lock#24 ->pool_lock#2 ->&obj_hash[i].lock FD: 29 BD: 6 +.+.: &table->hbs[i].lock ->pool_lock#2 ->&p->pi_lock FD: 12 BD: 6 +.+.: &ctx->completion_lock#2 ->stock_lock ->pool_lock#2 ->&ctx->timeout_lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 35 BD: 3 ..-.: &(&ctx->fallback_work)->timer FD: 29 BD: 4 ....: &x->wait#37 ->&p->pi_lock FD: 1 BD: 4 ....: &hash->wait FD: 1221 BD: 1 +.+.: (wq_completion)iou_exit ->(work_completion)(&ctx->exit_work) FD: 1220 BD: 2 +.+.: (work_completion)(&ctx->exit_work) ->&ctx->uring_lock ->&ctx->completion_lock#2 ->&(&ctx->fallback_work)->timer ->&obj_hash[i].lock ->&base->lock ->&x->wait#38 ->&rq->__lock ->(&timer.timer) ->pool_lock#2 ->stock_lock ->percpu_ref_switch_lock ->&ctx->napi_lock ->&zone->lock ->&xa->xa_lock#24 ->key ->pcpu_lock ->percpu_counters_lock ->pool_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&sqd->lock ->&x->wait#48 ->quarantine_lock ->&lock->wait_lock ->&p->pi_lock FD: 1 BD: 7 ....: &ctx->timeout_lock FD: 29 BD: 6 ..-.: &x->wait#38 ->&p->pi_lock FD: 1212 BD: 2 +.+.: (work_completion)(&(&ctx->fallback_work)->work) ->&ctx->uring_lock FD: 1 BD: 6 ....: &tctx->wait FD: 1 BD: 19 +...: &pn->all_channels_lock FD: 529 BD: 2 +.+.: (work_completion)(&buf->work) ->&buf->lock ->&port->buf.lock/1 FD: 85 BD: 4 +.+.: sk_lock-AF_NFC ->&rq->__lock ->slock-AF_NFC ->&k->list_lock ->&k->k_lock ->llcp_devices_lock ->fs_reclaim ->pool_lock#2 ->&local->sdp_lock ->&local->sockets.lock ->&c->lock FD: 1 BD: 5 +...: slock-AF_NFC FD: 2 BD: 5 +.+.: &local->sdp_lock ->&local->sockets.lock FD: 1 BD: 6 ++++: &local->sockets.lock FD: 1 BD: 4 +...: clock-AF_NFC FD: 35 BD: 1 ..-.: &(&bat_priv->tt.work)->timer FD: 1 BD: 7 +...: key#30 FD: 1 BD: 7 +...: &bat_priv->tt.req_list_lock FD: 1 BD: 7 +...: &bat_priv->tt.roam_list_lock FD: 1 BD: 6 +.+.: &mq_lock FD: 107 BD: 2 +.+.: free_ipc_work ->&obj_hash[i].lock ->&x->wait#2 ->&rq->__lock ->mount_lock ->&fsnotify_mark_srcu ->&dentry->d_lock ->&type->s_umount_key#59 ->sb_lock ->unnamed_dev_ida.xa_lock ->list_lrus_mutex ->&xa->xa_lock#5 ->pool_lock#2 ->mnt_id_ida.xa_lock ->&ids->rwsem ->&ht->mutex ->percpu_counters_lock ->pcpu_lock ->sysctl_lock ->proc_inum_ida.xa_lock ->stock_lock ->&sb->s_type->i_lock_key#24 ->rename_lock.seqcount ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&____s->seqcount ->pool_lock ->rcu_node_0 ->quarantine_lock FD: 1 BD: 14 ....: disc_data_lock FD: 440 BD: 18 ++++: &pch->chan_sem ->&pch->downl ->&rq->__lock ->&mm->mmap_lock FD: 1 BD: 73 +...: &pch->downl FD: 34 BD: 69 ++..: &pch->upl ->&ppp->wlock ->&obj_hash[i].lock ->pool_lock#2 ->&ppp->rlock FD: 2 BD: 72 +.+.: &match->lock ->ptype_lock FD: 1 BD: 7 +...: smc_v4_hashinfo.lock FD: 1094 BD: 6 +.+.: sk_lock-AF_SMC ->slock-AF_SMC ->k-sk_lock-AF_INET ->k-slock-AF_INET#2 ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->rtnl_mutex ->&pnettable->lock ->smc_ib_devices.mutex ->&smc_clc_eid_table.lock ->&obj_hash[i].lock ->&smc->clcsock_release_lock ->k-clock-AF_INET ->smc_v4_hashinfo.lock ->clock-AF_SMC ->&rq->__lock FD: 1 BD: 7 +...: slock-AF_SMC FD: 495 BD: 7 +.+.: &smc->clcsock_release_lock ->&mm->mmap_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET#2 ->&net->smc.mutex_fback_rsn ->k-clock-AF_INET ->&obj_hash[i].lock ->pool_lock#2 ->&dir->lock ->stock_lock ->&sb->s_type->i_lock_key#9 ->&xa->xa_lock#9 ->&fsnotify_mark_srcu FD: 1 BD: 7 .+.+: &smc_clc_eid_table.lock FD: 1 BD: 8 +.+.: &net->smc.mutex_fback_rsn FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#5 FD: 1 BD: 82 ..-.: &list->lock#25 FD: 29 BD: 67 +.+.: __ip_vs_mutex ->&ipvs->dest_trash_lock ->&rq->__lock FD: 1 BD: 68 +...: &ipvs->dest_trash_lock FD: 28 BD: 67 +.+.: flowtable_lock ->&rq->__lock FD: 41 BD: 7 +.+.: nf_conntrack_mutex ->&nf_conntrack_locks[i] ->&rq->__lock ->&cfs_rq->removed.lock ->nf_conntrack_mutex.wait_lock ->&____s->seqcount#7 ->&obj_hash[i].lock ->pool_lock#2 ->&nf_conntrack_locks[i]/1 ->quarantine_lock FD: 28 BD: 3 ++++: &ids->rwsem ->&rq->__lock FD: 24 BD: 72 +.-.: bh_lock#2 ->pool_lock#2 ->&c->lock FD: 1 BD: 7 +...: clock-AF_SMC FD: 1 BD: 70 ....: &asoc->wait FD: 1 BD: 1 ....: _rs.lock#2 FD: 9 BD: 410 +.-.: sctp_assocs_id_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 8 +.+.: nf_conntrack_mutex.wait_lock FD: 46 BD: 70 +...: _xmit_NETROM ->(console_sem).lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 4 +.+.: &net->ipv4.ra_mutex FD: 362 BD: 1 +.+.: &type->s_umount_key#60/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&____s->seqcount ->shrinker_mutex ->list_lrus_mutex ->&c->lock ->sb_lock ->inode_hash_lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&x->wait#25 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->&____s->seqcount#2 ->&xa->xa_lock#5 ->&s->s_inode_list_lock ->free_vmap_area_lock ->&vn->busy.lock ->rcu_node_0 ->&xa->xa_lock#9 ->lock#4 ->&n->list_lock ->&cache->lock#3 ->&stream->mutex ->&sb->s_type->i_lock_key#39 ->&dentry->d_lock ->init_mm.page_table_lock ->(console_sem).lock ->&fsnotify_mark_srcu ->lock#5 ->&lruvec->lru_lock ->&vn->lazy.lock ->&x->wait#23 FD: 1 BD: 2 +.+.: &cache->lock#3 FD: 1 BD: 2 +.+.: &stream->mutex FD: 59 BD: 500 +.+.: &sb->s_type->i_lock_key#39 ->&dentry->d_lock ->&xa->xa_lock#9 FD: 120 BD: 1 +.+.: &type->s_umount_key#61 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#39 ->&dentry->d_lock/1 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->inode_hash_lock ->&fsnotify_mark_srcu ->pool_lock#2 ->quarantine_lock ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&vn->busy.lock ->&vn->lazy.lock FD: 1 BD: 9 +.+.: &fi->lock FD: 33 BD: 11 +.+.: &fc->bg_lock ->&fiq->lock ->&fc->blocked_waitq FD: 31 BD: 14 +.+.: &fiq->lock ->&fiq->waitq ->stock_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 29 BD: 15 ....: &fiq->waitq ->&p->pi_lock FD: 1 BD: 6 +.+.: &fpq->lock FD: 29 BD: 10 +.+.: &req->waitq ->&p->pi_lock FD: 1 BD: 14 ....: &fc->blocked_waitq FD: 1 BD: 19 ....: key#31 FD: 397 BD: 1 +.+.: &type->s_umount_key#63/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->&c->lock ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->&rq->__lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->&____s->seqcount#2 ->&____s->seqcount ->crypto_alg_sem ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->rcu_node_0 ->(console_sem).lock ->bit_wait_table + i ->&obj_hash[i].lock ->percpu_counters_lock ->slab_mutex ->&xa->xa_lock#5 ->&sb->s_type->i_lock_key#41 ->&folio_wait_table[i] ->key#31 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&n->list_lock ->&array[i].curseg_mutex ->&array[i].journal_rwsem ->&free_i->segmap_lock ->&dirty_i->seglist_lock ->&sit_i->sentry_lock ->&nm_i->nid_list_lock ->&nm_i->build_lock ->f2fs_stat_lock ->&nm_i->nat_tree_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&sbi->cp_global_sem ->batched_entropy_u8.lock ->kfence_freelist_lock ->&base->lock ->&fq->mq_flush_lock ->f2fs_list_lock ->&dentry->d_lock ->wq_pool_mutex ->&sbi->cp_lock ->&sb->map[i].swap_lock FD: 59 BD: 582 +.+.: &sb->s_type->i_lock_key#41 ->&dentry->d_lock ->&xa->xa_lock#9 FD: 1 BD: 1 ....: &sbi->gc_thread->gc_wait_queue_head FD: 1 BD: 10 +.+.: &ei->cache_lru_lock FD: 22 BD: 71 +.-.: slock-AF_INET6/1 ->&sctp_ep_hashtable[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&sctp_port_hashtable[i].lock ->clock-AF_INET6 FD: 1 BD: 71 ....: rlock-AF_INET6 FD: 130 BD: 69 +.+.: sk_lock-AF_INET6/1 ->slock-AF_INET6 ->rlock-AF_INET6 ->&list->lock#20 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->pool_lock#2 ->krc.lock ->rcu_node_0 ->sctp_assocs_id_lock ->&cfs_rq->removed.lock ->k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->&token_hash[i].lock ->&msk->pm.lock ->&c->lock ->&list->lock#25 ->fs_reclaim ->&zone->lock ->&n->list_lock FD: 1 BD: 76 ++.-: &sctp_ep_hashtable[i].lock FD: 1 BD: 72 ....: &list->lock#20 FD: 1 BD: 5 ..-.: rlock-AF_INET FD: 79 BD: 4 +.+.: sk_lock-AF_INET/1 ->slock-AF_INET#2 ->rlock-AF_INET ->&list->lock#20 ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->krc.lock ->sctp_assocs_id_lock FD: 43 BD: 583 +.+.: &sb->s_type->i_lock_key#42 ->&dentry->d_lock FD: 361 BD: 1 +.+.: &type->s_umount_key#64/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->batched_entropy_u8.lock ->kfence_freelist_lock ->bdev_lock ->&disk->open_mutex ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->&base->lock ->rcu_node_0 ->&rq->__lock ->lock#5 ->bit_wait_table + i ->nls_lock ->&s->s_inode_list_lock ->&ei->cache_lru_lock ->&rcu_state.expedited_wq ->&sb->map[i].swap_lock ->&sb->s_type->i_lock_key#42 ->&wb->list_lock ->&wb->work_lock ->&dentry->d_lock FD: 458 BD: 1 +.+.: (wq_completion)loop1 ->(work_completion)(&worker->work) ->(work_completion)(&lo->rootcg_work) ->&rq->__lock FD: 169 BD: 4 ++++: &sb->s_type->i_mutex_key#25 ->fs_reclaim ->&c->lock ->stock_lock ->pool_lock#2 ->&dentry->d_lock ->&sbi->s_lock ->&sb->s_type->i_lock_key#42 ->&rq->__lock ->namespace_sem FD: 172 BD: 2 +.+.: &sbi->umount_mutex ->&rq->__lock ->&p->pi_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&x->wait ->&dcc->cmd_lock ->&x->wait#40 ->&sbi->cp_global_sem ->&im->ino_lock ->f2fs_list_lock FD: 1 BD: 187 +.+.: &pa->pa_lock#2 FD: 1 BD: 159 ...-: init_task.mems_allowed_seq.seqcount FD: 1 BD: 1549 -.-.: &rd->rto_lock FD: 17 BD: 1549 -.-.: &rq->__lock/1 ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&rt_rq->rt_runtime_lock ->&rt_b->rt_runtime_lock FD: 1 BD: 22 +.+.: &msft->filter_lock FD: 1 BD: 5 +...: ip6_fl_lock FD: 1 BD: 5 +...: recent_lock FD: 1 BD: 183 +.+.: &cache->c_list_lock FD: 56 BD: 5 +.+.: (work_completion)(&(&sbi->mdb_work)->work) ->&sbi->work_lock ->&xa->xa_lock#9 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock FD: 1 BD: 80 +.-.: &msk->pm.lock FD: 225 BD: 1 +.+.: &type->s_umount_key#80/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->&c->lock ->sb_lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->percpu_counters_lock ->&x->wait#8 ->&n->list_lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#36 ->cgwb_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->bdi_lock ->&xa->xa_lock#5 ->stock_lock ->mmu_notifier_invalidate_range_start ->inode_hash_lock ->&m->req_lock ->&rq->__lock ->remove_cache_srcu ->&clnt->lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#48 ->&dentry->d_lock ->&x->wait#23 ->rename_lock.seqcount ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&dentry->d_lock/1 ->&bdi->wb_switch_rwsem ->&s->s_sync_lock FD: 83 BD: 3 +.+.: &fi->mutex ->fs_reclaim ->&fiq->lock ->&req->waitq ->&rq->__lock ->&lock->wait_lock ->stock_lock ->pool_lock#2 ->&fc->blocked_waitq ->&obj_hash[i].lock FD: 2 BD: 3 +.+.: &id_priv->handler_mutex ->&id_priv->lock FD: 1 BD: 1 ....: &x->wait#45 FD: 5 BD: 1 +.+.: &file->mut ->ctx_table.xa_lock FD: 4 BD: 2 +.+.: ctx_table.xa_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 3 ....: &x->wait#46 FD: 40 BD: 1 +.+.: put_task_map-wait-type-override#2 ->css_set_lock ->&obj_hash[i].lock ->pool_lock#2 ->stock_lock FD: 71 BD: 1 +.-.: (&n->timer) ->&n->lock ->&obj_hash[i].lock ->pool_lock#2 ->icmp_global.lock ->&c->lock ->&dir->lock#2 ->nl_table_lock ->nl_table_wait.lock ->&ul->lock#2 ->&meta->lock ->kfence_freelist_lock ->&n->list_lock ->batched_entropy_u8.lock FD: 142 BD: 11 +.+.: &sb->s_type->i_mutex_key#27/1 ->&rq->__lock ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->tomoyo_ss ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#44 ->&sb->s_type->i_mutex_key#27/5 ->&sb->s_type->i_mutex_key#27 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&sb->s_type->i_mutex_key#27/4 ->&fsnotify_mark_srcu ->&xa->xa_lock#9 ->&obj_hash[i].lock FD: 165 BD: 9 .+.+: sb_writers#22 ->&rq->__lock ->mount_lock ->&sb->s_type->i_mutex_key#27/1 ->&sb->s_type->i_mutex_key#27 ->inode_hash_lock ->fs_reclaim ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#37 ->&dentry->d_lock ->tomoyo_ss ->pool_lock#2 ->&sb->s_type->i_lock_key#44 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&wb->list_lock ->(console_sem).lock ->&type->s_vfs_rename_key#5 ->&c->lock ->&obj_hash[i].lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu FD: 91 BD: 1 .+.+: &sb->s_type->i_mutex_key#31 ->fs_reclaim ->stock_lock ->&dentry->d_lock FD: 38 BD: 2 +.+.: (work_completion)(&(&devlink->rwork)->work) ->&obj_hash[i].lock ->&x->wait#2 ->&rq->__lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 67 +...: nr_neigh_list_lock FD: 1 BD: 67 +...: nr_list_lock FD: 45 BD: 2 +.+.: (fqdir_free_work).work ->rcu_state.barrier_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 35 BD: 1 ..-.: net/ipv4/inet_fragment.c:178 FD: 4 BD: 67 +...: &bond->mode_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 72 ....: (&ifmgd->timer) FD: 28 BD: 1 +.+.: (work_completion)(&(&bond->alb_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 28 BD: 1 +.+.: (work_completion)(&(&bond->mii_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 35 BD: 1 ..-.: &(&bond->mii_work)->timer FD: 35 BD: 1 ..-.: &(&bond->alb_work)->timer FD: 1 BD: 67 +...: &sch->root_lock_key#338 FD: 187 BD: 70 +.+.: team->team_lock_key#17 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->&dev_addr_list_lock_key#8 ->&c->lock ->&n->list_lock ->&rq->__lock ->&dir->lock#2 ->input_pool.lock ->&____s->seqcount#2 ->&____s->seqcount ->netdev_rename_lock.seqcount ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock FD: 443 BD: 2 ++++: &fc->killsb ->inode_hash_lock ->&type->i_mutex_dir_key#12/1 FD: 1 BD: 1 ....: &ff->poll_wait FD: 66 BD: 68 +.-.: (&peer->timer_retransmit_handshake) ->&peer->endpoint_lock FD: 134 BD: 1 .+.+: sb_writers#18 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#28 ->&wb->list_lock ->fuse_mutex ->&fc->lock ->&fiq->lock ->&req->waitq ->&rq->__lock ->&fc->blocked_waitq ->&obj_hash[i].lock ->pool_lock#2 FD: 451 BD: 1 +.+.: &type->s_umount_key#67 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->&fc->killsb ->&fc->lock ->fuse_mutex ->rename_lock.seqcount ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#40 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&fiq->lock ->inode_hash_lock ->pool_lock#2 ->&dentry->d_lock/1 ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->lock#4 ->lock#5 ->&lruvec->lru_lock FD: 1 BD: 67 +...: &sch->root_lock_key#340 FD: 1 BD: 1 +.+.: v9fs_sessionlist_lock FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#15 FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#14 FD: 1 BD: 69 +...: &batadv_netdev_xmit_lock_key FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#13 FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#12 FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#11 FD: 1 BD: 67 +...: &bat_priv->forw_bcast_list_lock FD: 1 BD: 67 ....: (&hsr->announce_proxy_timer) FD: 1 BD: 67 ....: (&hsr->prune_proxy_timer) FD: 1 BD: 5 +...: k-clock-AF_NETLINK FD: 1 BD: 69 +.+.: acaddr_hash_lock FD: 96 BD: 6 .+.+: &jfs_ip->rdwrlock/1 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->tk_core.seq.seqcount ->rcu_node_0 ->&rq->__lock ->&jfs_ip->rdwrlock#2/2 ->&bmp->db_bmaplock ->&mp->wait ->&sb->s_type->i_lock_key#50 ->&wb->list_lock ->&c->lock ->&____s->seqcount#2 ->&folio_wait_table[i] FD: 2 BD: 1 +.+.: sk_lock-AF_ALG ->slock-AF_ALG FD: 89 BD: 2 +.+.: (work_completion)(&(&pool->release_dw)->work) ->&rq->__lock ->&r->consumer_lock#3 ->&obj_hash[i].lock ->mem_id_lock ->page_pools_lock ->pool_lock#2 ->pcpu_lock FD: 1091 BD: 10 +.+.: &devlink->lock_key#24 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&n->list_lock ->&xa->xa_lock#19 ->&____s->seqcount#2 ->&____s->seqcount ->pcpu_alloc_mutex ->&obj_hash[i].lock ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->rcu_node_0 ->&(&fn_net->fib_chain)->lock FD: 104 BD: 1 +.+.: &type->i_mutex_dir_key#18/1 ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->lock#4 ->&mapping->i_private_lock ->&c->lock ->pool_lock#2 ->&sb->s_type->i_lock_key#51 ->rename_lock ->(console_sem).lock FD: 28 BD: 5 +.+.: ipvs->sync_mutex ->&rq->__lock FD: 28 BD: 5 +.+.: &fn->fou_lock ->&rq->__lock FD: 29 BD: 9 +.+.: (work_completion)(&rxnet->service_conn_reaper) ->&rxnet->conn_lock ->&rq->__lock FD: 1 BD: 3 +.+.: dev_map_lock FD: 80 BD: 2 +.+.: (work_completion)(&fqdir->destroy_work) ->&ht->mutex ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 FD: 9 BD: 399 ....: &clnt->lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 68 +.-.: bpf_lock FD: 1 BD: 2 +...: slock-AF_ALG FD: 46 BD: 14 +.+.: &t->lock ->(console_sem).lock FD: 35 BD: 5 +.+.: (wq_completion)l2tp ->(work_completion)(&tunnel->del_work) FD: 1 BD: 5 ....: loop_conns_lock FD: 1 BD: 5 ....: rds_tcp_conn_lock FD: 1 BD: 5 ....: (&sk->sk_timer) FD: 34 BD: 2 +.+.: (work_completion)(&(&bat_priv->bla.work)->work) ->key#18 ->&obj_hash[i].lock ->&base->lock ->crngs.lock ->rcu_node_0 ->&rq->__lock FD: 1 BD: 7 +...: &hash->list_locks[i] FD: 32 BD: 2 +.+.: (work_completion)(&(&bat_priv->dat.work)->work) ->&hash->list_locks[i] ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 35 BD: 1 ..-.: &(&bat_priv->bla.work)->timer FD: 35 BD: 1 ..-.: &(&bat_priv->dat.work)->timer FD: 1 BD: 5 +.+.: (wq_completion)krdsd FD: 1 BD: 67 +...: &sch->root_lock_key#783 FD: 29 BD: 5 +.+.: rdma_nets_rwsem ->rdma_nets.xa_lock ->&rq->__lock FD: 187 BD: 67 +.+.: team->team_lock_key#24 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->&dev_addr_list_lock_key#8 ->&dir->lock#2 ->input_pool.lock ->&c->lock ->netdev_rename_lock.seqcount ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&rq->__lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->lweventlist_lock ->(console_sem).lock FD: 119 BD: 1 +.+.: &type->s_umount_key#87 ->&rq->__lock ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#51 ->&dentry->d_lock/1 ->pool_lock#2 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->inode_hash_lock ->&fsnotify_mark_srcu FD: 1 BD: 5 ....: (&local->sta_cleanup) FD: 1 BD: 67 +...: &sch->root_lock_key#782 FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#40 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 1 BD: 67 +...: &sch->root_lock_key#735 FD: 1 BD: 67 +...: &sch->root_lock_key#781 FD: 49 BD: 76 +...: &dev_addr_list_lock_key/2 ->&dev_addr_list_lock_key#7/1 ->&dev_addr_list_lock_key#8 ->&obj_hash[i].lock ->krc.lock FD: 1 BD: 67 +...: &sch->root_lock_key#793 FD: 194 BD: 3 ++++: minor_rwsem#2 ->fs_reclaim ->&c->lock ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->&____s->seqcount#2 ->&____s->seqcount ->uevent_sock_mutex ->&k->k_lock ->subsys mutex#79 ->&hiddev->existancelock FD: 1 BD: 966 ....: &memcg->deferred_split_queue.split_queue_lock FD: 1 BD: 68 +.+.: &block->proto_destroy_lock FD: 55 BD: 3 +.+.: &nsock->tx_lock ->&u->lock ->rcu_node_0 ->&rq->__lock ->&lock->wait_lock ->&rcu_state.gp_wq FD: 138 BD: 1 ++++: &type->s_umount_key#68 ->&sb->s_type->i_lock_key#42 ->&mapping->i_private_lock ->mmu_notifier_invalidate_range_start ->&sbi->inode_hash_lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#3 ->&wb->list_lock ->&sbi->s_lock ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/1 ->pool_lock#2 ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&s->s_inode_list_lock ->&ei->cache_lru_lock ->inode_hash_lock ->&fsnotify_mark_srcu ->&wb->work_lock ->tk_core.seq.seqcount ->&base->lock ->bit_wait_table + i ->&rq->__lock FD: 1 BD: 1 ....: &list->lock#23 FD: 1 BD: 1 ....: (&local->client_conn_reap_timer) FD: 1 BD: 8 ..-.: rlock-AF_RXRPC FD: 1 BD: 5 +...: k-clock-AF_RXRPC FD: 1 BD: 3 +.+.: &(&net->fs_lock)->lock FD: 1 BD: 6 +.+.: &call->notify_lock FD: 1 BD: 6 +.+.: &rx->recvmsg_lock FD: 305 BD: 8 +.+.: &sb->s_type->i_mutex_key#23/4 ->&ei->i_mmap_lock ->&ei->i_mmap_lock/1 FD: 1 BD: 749 ....: &wq#3 FD: 1 BD: 5 ....: (&net->fs_probe_timer) FD: 1088 BD: 6 +.+.: (work_completion)(&(&rdev->dfs_update_channels_wk)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 5 +.+.: (wq_completion)kafsd FD: 124 BD: 178 +.+.: btrfs-chunk-00/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&c->lock ->&fs_info->ref_verify_lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&eb->refs_lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#38 ->&wb->list_lock ->&wb->work_lock ->key#25 ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 7 ++++: &net->cells_lock FD: 194 BD: 5 +.+.: &root->delalloc_mutex ->&root->delalloc_lock ->&sb->s_type->i_lock_key#38 ->&tree->lock ->lock#4 ->lock#5 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->btrfs-tree-01 ->btrfs-tree-00 ->&eb->refs_lock ->&tree->lock#2 ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&ei->lock ->&ei->ordered_tree_lock ->&root->ordered_extent_lock ->&xa->xa_lock#9 ->&rq->__lock ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->tk_core.seq.seqcount ->rcu_node_0 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->key#27 ->&wb->list_lock FD: 195 BD: 4 +.+.: &fs_info->delalloc_root_mutex ->&fs_info->delalloc_root_lock ->&root->delalloc_mutex FD: 29 BD: 245 ....: &ticket.wait ->&p->pi_lock FD: 6 BD: 214 +.+.: &fs_info->ref_verify_lock ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount FD: 1 BD: 67 ....: (&brmctx->ip6_other_query.delay_timer) FD: 1 BD: 67 ....: (&brmctx->ip6_other_query.timer) FD: 1 BD: 67 ....: (&brmctx->ip6_mc_router_timer) FD: 1 BD: 67 ....: (&brmctx->ip4_other_query.delay_timer) FD: 1 BD: 67 ....: (&brmctx->ip4_other_query.timer) FD: 1 BD: 67 ....: (&brmctx->ip4_mc_router_timer) FD: 1 BD: 67 ....: (&br->tcn_timer) FD: 1 BD: 67 ....: (&br->topology_change_timer) FD: 1 BD: 67 ....: (&br->hello_timer) FD: 1 BD: 67 +...: &bond->ipsec_lock FD: 1 BD: 5 ....: (&net->fs_timer) FD: 1 BD: 6 +.+.: &rx->incoming_lock FD: 1 BD: 2 +.+.: vlan_ioctl_mutex.wait_lock FD: 1 BD: 6 ....: (rxrpc_call_limiter).lock FD: 1 BD: 6 ....: &list->lock#22 FD: 1 BD: 72 ....: (&local->dynamic_ps_timer) FD: 36 BD: 72 +.-.: (&ifibss->timer) ->&rdev->wiphy_work_lock FD: 37 BD: 68 +.-.: (&peer->timer_send_keepalive) ->pool_lock#2 ->&list->lock#14 ->tk_core.seq.seqcount ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 31 BD: 2 +.+.: (work_completion)(&net->cells_manager) ->&net->cells_lock ->bit_wait_table + i FD: 303 BD: 10 +.+.: &ei->i_mmap_lock/1 ->&sb->s_type->i_lock_key#38 ->&tree->lock ->lock#4 ->lock#5 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->btrfs-tree-01 ->btrfs-tree-00 ->&eb->refs_lock ->&tree->lock#2 ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&ei->lock ->&ei->ordered_tree_lock ->&root->ordered_extent_lock ->&xa->xa_lock#9 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->tk_core.seq.seqcount ->&folio_wait_table[i] ->&rq->__lock ->sb_internal#4 ->btrfs_trans_num_writers ->fs_reclaim ->&space_info->lock ->&rsv->lock ->rcu_node_0 ->btrfs_ordered_extent ->&entry->wait ->&fs_info->qgroup_lock ->&root->qgroup_meta_rsv_lock FD: 121 BD: 2 .+.+: &type->i_mutex_dir_key#12 ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&fiq->lock ->&req->waitq ->&rq->__lock ->rename_lock.seqcount ->stock_lock ->&dentry->d_lock ->inode_hash_lock ->remove_cache_srcu ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#40 ->&fi->lock ->&obj_hash[i].lock ->&fi->rdc.lock ->&fi->mutex ->&c->lock ->&lock->wait_lock ->&p->pi_lock FD: 36 BD: 1 +.+.: (wq_completion)afs ->(work_completion)(&net->cells_manager) ->(work_completion)(&net->fs_manager) FD: 1 BD: 5 ....: (&rxnet->service_conn_reap_timer) FD: 88 BD: 1 .+.+: sb_writers#23 ->mount_lock ->&type->i_mutex_dir_key#16 FD: 36 BD: 72 +.-.: (&dwork->timer)#3 ->&rdev->wiphy_work_lock FD: 1 BD: 5 ....: (&net->cells_timer) FD: 1 BD: 67 +...: &pmc->lock FD: 1 BD: 173 ....: &x->wait#44 FD: 101 BD: 176 +.+.: btrfs-log-00/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&fs_info->block_group_cache_lock ->&space_info->lock ->&eb->refs_lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->&obj_hash[i].lock ->&xa->xa_lock#9 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->key#25 ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 67 ....: &rdev->dev_wait FD: 10 BD: 4 +.+.: &chan->lock/1 ->sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->clock-AF_BLUETOOTH ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&____s->seqcount FD: 306 BD: 2 +.+.: (work_completion)(&fs_info->async_data_reclaim_work) ->&space_info->lock ->key#26 ->key#27 ->&fs_info->delalloc_root_mutex ->&fs_info->ordered_operations_mutex ->&fs_info->delayed_iput_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&fs_info->trans_lock ->btrfs_trans_num_writers ->btrfs_trans_completed ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&fs_info->scrub_pause_wait ->sb_internal#4 FD: 169 BD: 167 +.+.: &root->log_mutex ->mmu_notifier_invalidate_range_start ->&c->lock ->&n->list_lock ->pool_lock#2 ->&rsv->lock ->&space_info->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->&mapping->i_private_lock ->&rq->__lock ->lock ->&eb->refs_lock ->btrfs-log-00 ->lock#4 ->&root->log_writer_wait ->&tree->lock ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#38 ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->tk_core.seq.seqcount ->lock#5 ->&folio_wait_table[i] ->&wb->list_lock ->btrfs-log-00/1 ->rcu_node_0 ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&sb->map[i].swap_lock ->key#36 ->remove_cache_srcu ->&rcu_state.expedited_wq ->btrfs-log-01#2 ->&base->lock FD: 136 BD: 175 ++++: btrfs-log-00 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&xa->xa_lock#9 ->key#25 ->&eb->refs_lock ->&rsv->lock ->&space_info->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&c->lock ->&____s->seqcount ->stock_lock ->lock#4 ->&mapping->i_private_lock ->lock ->btrfs-log-00/1 ->&____s->seqcount#2 ->&rq->__lock ->btrfs-log-01/7 FD: 197 BD: 167 +.+.: &ei->log_mutex ->btrfs-log-00 ->&eb->refs_lock ->btrfs-tree-01 ->btrfs-tree-00 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&tree->lock#2 ->&ei->lock ->btrfs-log-00/1 ->&c->lock ->&ei->ordered_tree_lock ->&rq->__lock ->btrfs-log-01/7 ->btrfs-log-00/6 ->btrfs-log-01#2 ->btrfs-log-01#2/1 FD: 1 BD: 168 ....: &root->log_writer_wait FD: 158 BD: 177 ++++: btrfs-chunk-00 ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-chunk-00/1 ->key#25 ->&rq->__lock ->stock_lock FD: 306 BD: 2 +.+.: (work_completion)(&fs_info->async_reclaim_work) ->&space_info->lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&fs_info->trans_lock ->btrfs_trans_num_writers ->&obj_hash[i].lock ->key#26 ->key#27 ->&fs_info->delalloc_root_mutex ->&base->lock ->&rq->__lock ->(&timer.timer) ->sb_internal#4 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&fs_info->delayed_iput_lock ->btrfs_trans_completed ->tk_core.seq.seqcount ->&fs_info->scrub_pause_wait FD: 1 BD: 6 ....: (&call->timer) FD: 33 BD: 2 +.+.: (work_completion)(&net->fs_manager) ->&(&net->fs_lock)->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->bit_wait_table + i FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#10 FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#9 FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#8 FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#7 FD: 1 BD: 69 +...: &qdisc_xmit_lock_key#6 FD: 1 BD: 1 ....: _rs.lock#5 FD: 1 BD: 1 ....: _rs.lock#4 FD: 1 BD: 151 +.+.: binder_alloc_mmap_lock FD: 47 BD: 6 +.+.: &alloc->lock ->&lru->node[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->(console_sem).lock FD: 1 BD: 9 +.+.: binder_dead_nodes_lock FD: 1 BD: 5 +.+.: binder_deferred_lock.wait_lock FD: 101 BD: 2 +.+.: binder_deferred_work ->binder_deferred_lock ->&proc->inner_lock ->binder_procs_lock ->&device->context.context_mgr_node_lock ->&obj_hash[i].lock ->pool_lock#2 ->&node->lock ->&proc->outer_lock ->&alloc->lock ->quarantine_lock ->(console_sem).lock ->&rq->__lock ->&t->lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&x->wait#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 36 BD: 4 +.+.: binder_deferred_lock ->&rq->__lock ->binder_deferred_lock.wait_lock FD: 51 BD: 7 +.+.: &proc->outer_lock ->&node->lock FD: 44 BD: 69 +.-.: (&mp->timer) ->&br->multicast_lock FD: 50 BD: 8 +.+.: &node->lock ->&proc->inner_lock ->binder_dead_nodes_lock FD: 95 BD: 3 +.+.: &device->context.context_mgr_node_lock ->fs_reclaim ->pool_lock#2 ->&proc->inner_lock ->&node->lock ->&proc->outer_lock ->&c->lock ->&rq->__lock FD: 48 BD: 13 +.+.: &proc->inner_lock ->&thread->wait ->&t->lock ->(console_sem).lock ->tk_core.seq.seqcount FD: 54 BD: 5 +.+.: binder_procs_lock ->&proc->inner_lock ->&node->lock ->&proc->outer_lock ->&alloc->lock FD: 1 BD: 3 ....: (&p->timer) FD: 1 BD: 3 ....: (&p->rexmit_timer) FD: 48 BD: 2 +.+.: (work_completion)(&br->mcast_gc_work) ->&br->multicast_lock ->(&p->rexmit_timer) ->&obj_hash[i].lock ->&base->lock ->(&p->timer) ->pool_lock#2 ->krc.lock ->(&mp->timer) FD: 1 BD: 67 ....: (&pmctx->ip4_mc_router_timer) FD: 1 BD: 67 ....: (&pmctx->ip6_mc_router_timer) FD: 671 BD: 2 +.+.: (work_completion)(&msk->work) ->sk_lock-AF_INET ->slock-AF_INET#2 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->stock_lock ->sk_lock-AF_INET6 ->slock-AF_INET6 FD: 65 BD: 71 +.-.: k-slock-AF_INET6/1 ->tk_core.seq.seqcount ->pool_lock#2 ->&obj_hash[i].lock ->slock-AF_INET6 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&base->lock ->&sctp_ep_hashtable[i].lock ->k-clock-AF_INET6 ->&tcp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] ->&dir->lock ->krc.lock FD: 1 BD: 1 ....: net_ratelimit_state.lock FD: 1 BD: 83 +.-.: &token_hash[i].lock FD: 131 BD: 10 +.+.: k-sk_lock-AF_INET/1 ->k-slock-AF_INET#2 ->pool_lock#2 ->&dir->lock ->fs_reclaim ->slock-AF_INET#2 ->&obj_hash[i].lock ->k-clock-AF_INET ->&hashinfo->ehash_locks[i] ->&rq->__lock ->tk_core.seq.seqcount ->&base->lock ->rcu_node_0 ->&tcp_hashinfo.bhash[i].lock ->krc.lock FD: 43 BD: 500 +.+.: &sb->s_type->i_lock_key#47 ->&dentry->d_lock FD: 129 BD: 70 +.+.: k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->pool_lock#2 ->&dir->lock ->fs_reclaim ->&h->lhash2[i].lock ->&icsk->icsk_accept_queue.rskq_lock#2 ->&obj_hash[i].lock ->krc.lock ->&rq->__lock ->k-clock-AF_INET6 ->rlock-AF_INET6 ->&list->lock#20 ->tk_core.seq.seqcount ->&base->lock ->&c->lock FD: 1 BD: 5 +.+.: netns_bpf_mutex FD: 29 BD: 1 ....: &x->wait#43 ->&p->pi_lock FD: 29 BD: 1 +.+.: &vtsk->exit_mutex ->&p->pi_lock FD: 1 BD: 1 ....: &dev->wait FD: 1 BD: 1 +.+.: &dev->iotlb_lock FD: 4 BD: 8 +...: vsock_table_lock ->batched_entropy_u32.lock FD: 1 BD: 67 +...: &sch->root_lock_key#773 FD: 1 BD: 1 +.+.: vhost_vsock_mutex FD: 1 BD: 4 +...: &list->lock#21 FD: 1 BD: 4 ....: &ctx->wqh FD: 1 BD: 1 ....: eventfd_ida.xa_lock FD: 1 BD: 67 +.+.: &bond->stats_lock/2 FD: 276 BD: 1 +.+.: &fc->uapi_mutex ->&type->s_umount_key#32 ->&obj_hash[i].lock ->pool_lock#2 ->sb_lock ->fs_reclaim ->&type->s_umount_key/1 ->stock_lock ->mnt_id_ida.xa_lock ->&rq->__lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->ucounts_lock ->&newf->file_lock FD: 28 BD: 1 +.+.: ®ion->snapshot_lock ->&rq->__lock FD: 29 BD: 2 ....: &x->wait#42 ->&p->pi_lock FD: 1 BD: 80 +.-.: bh_lock#3 FD: 175 BD: 13 +.+.: &type->i_mutex_dir_key#3/2 ->mmu_notifier_invalidate_range_start ->&ei->xattr_sem ->&ei->i_raw_lock ->&ei->i_es_lock ->tk_core.seq.seqcount ->pool_lock#2 ->rcu_node_0 ->rename_lock FD: 317 BD: 11 +.+.: cpool_mutex ->fs_reclaim ->pool_lock#2 ->cpu_hotplug_lock ->&obj_hash[i].lock ->crypto_alg_sem FD: 441 BD: 3 +.+.: &vq->mutex ->&mm->mmap_lock ->&ctx->wqh ->&p->pi_lock ->rcu_node_0 ->&rq->__lock ->&lock->wait_lock ->&list->lock#21 ->&sem->wait_lock ->&rcu_state.expedited_wq FD: 442 BD: 2 +.+.: &worker->mutex ->&vq->mutex ->&p->pi_lock ->&rq->__lock FD: 1 BD: 2 +.+.: &xa->xa_lock#25 FD: 471 BD: 1 +.+.: &dev->mutex#4 ->&rq->__lock ->&p->alloc_lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->stock_lock ->&sighand->siglock ->&vn->pool_lock ->&vn->busy.lock ->init_mm.page_table_lock ->batched_entropy_u64.lock ->&obj_hash[i].lock ->&fs->lock ->lock ->pidmap_lock ->cgroup_threadgroup_rwsem ->&p->pi_lock ->&xa->xa_lock#25 ->&worker->mutex ->&x->wait#42 ->&mm->mmap_lock ->&vq->mutex ->&lock->wait_lock ->rcu_node_0 ->&n->list_lock FD: 1 BD: 1 ....: _rs.lock#3 FD: 1 BD: 8 +...: &vvs->tx_lock FD: 84 BD: 2 +.+.: &mdev->open_mutex ->clients_lock ->&client->ports_lock ->fs_reclaim ->pool_lock#2 ->&grp->list_mutex#2 ->&grp->list_mutex/1 ->&c->lock ->&obj_hash[i].lock FD: 1 BD: 67 +...: &net->xfrm.xfrm_policy_lock FD: 1 BD: 5 +...: &net->nsid_lock FD: 1199 BD: 2 +.+.: net_cleanup_work ->pernet_ops_rwsem ->rcu_state.barrier_mutex ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&dir->lock ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 4 ....: &x->wait#41 FD: 1 BD: 5 ....: (&sbi->s_err_report) FD: 1200 BD: 1 +.+.: (wq_completion)netns ->net_cleanup_work FD: 1 BD: 70 +.+.: rcu_state.barrier_mutex.wait_lock FD: 131 BD: 2 ++++: &type->s_umount_key#69 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->pool_lock#2 ->rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/1 ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->&sbinfo->stat_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&simple_offset_lock_class ->stock_lock ->percpu_counters_lock ->pcpu_lock ->&base->lock ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&info->lock ->tk_core.seq.seqcount ->&rq->__lock ->key#9 ->&sbinfo->shrinklist_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&wb->list_lock ->inode_hash_lock FD: 1 BD: 30 +.+.: hci_cb_list_lock.wait_lock FD: 59 BD: 1 +.-.: (&p->forward_delay_timer) ->&br->lock FD: 1 BD: 1 ....: &fcc->flush_wait_queue FD: 1 BD: 1 ....: &dcc->discard_wait_queue FD: 66 BD: 20 +.+.: &array[i].curseg_mutex ->&array[i].journal_rwsem ->&sit_i->sentry_lock ->&io->io_lock ->&rq->__lock FD: 40 BD: 27 ++++: &array[i].journal_rwsem ->&nm_i->nat_list_lock ->&nm_i->nid_list_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 FD: 1 BD: 22 +.+.: &free_i->segmap_lock FD: 4 BD: 22 +.+.: &dirty_i->seglist_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 64 BD: 21 ++++: &sit_i->sentry_lock ->tk_core.seq.seqcount ->&dcc->cmd_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&array[i].journal_rwsem ->&obj_hash[i].lock ->&rq->__lock ->&dirty_i->seglist_lock ->&free_i->segmap_lock ->stock_lock ->&xa->xa_lock#9 ->lock#4 FD: 13 BD: 403 +.+.: &nm_i->nid_list_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock FD: 139 BD: 5 +.+.: &nm_i->build_lock ->&rq->__lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->tk_core.seq.seqcount ->&c->lock ->&nm_i->nat_tree_lock ->&s->s_inode_list_lock ->&sb->map[i].swap_lock ->rcu_node_0 ->&____s->seqcount#2 FD: 129 BD: 23 ++++: &nm_i->nat_tree_lock ->&folio_wait_table[i] ->&rq->__lock ->lock#4 ->&nm_i->nid_list_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->lock ->&array[i].journal_rwsem ->&nm_i->nat_list_lock ->&obj_hash[i].lock ->stock_lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#41 ->&wb->list_lock ->&wb->work_lock ->&n->list_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 3 ....: f2fs_stat_lock FD: 1 BD: 28 +.+.: &nm_i->nat_list_lock FD: 169 BD: 9 +.+.: &sbi->cp_global_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->&rq->__lock ->lock#4 ->pool_lock#2 ->tk_core.seq.seqcount ->&folio_wait_table[i] ->&sb->s_type->i_lock_key#41 ->lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock ->&sbi->cp_rwsem ->&sbi->inode_lock[i] ->&wb->list_lock ->&sem->wait_lock ->&p->pi_lock ->&et->lock ->rcu_node_0 ->&nm_i->nat_tree_lock ->&sm_info->curseg_lock ->&io->io_rwsem FD: 1 BD: 4 +.+.: f2fs_list_lock FD: 136 BD: 16 .+.+: &fi->i_xattr_sem ->&fi->i_sem/1 ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->lock#4 ->&obj_hash[i].lock ->&rq->__lock FD: 215 BD: 2 .+.+: sb_writers#16 ->mount_lock ->&type->i_mutex_dir_key#11 ->&sb->s_type->i_mutex_key#24 ->&sb->s_type->i_lock_key#41 ->&wb->list_lock ->&xa->xa_lock#9 ->lock#4 ->lock#5 ->&sbi->inode_lock[i] ->&et->lock ->&fi->i_sem#2 ->&sbi->gc_lock ->tk_core.seq.seqcount ->fs_reclaim ->&obj_hash[i].lock ->&c->lock ->remove_cache_srcu ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->&type->i_mutex_dir_key#11/1 ->&io->io_rwsem ->&fi->i_size_lock ->&f->f_lock ->&dentry->d_lock ->tomoyo_ss ->&fi->i_xattr_sem FD: 179 BD: 3 +.+.: &type->i_mutex_dir_key#11 ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->tomoyo_ss ->lock#4 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&s->s_inode_list_lock ->&nm_i->nid_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->inode_hash_lock ->&sbi->sb_lock ->&eti->extent_tree_lock ->&sbi->cp_rwsem ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#41 ->&rq->__lock ->&sem->wait_lock ->&p->pi_lock ->&cprc->ckpt_wait_queue ->&x->wait#50 ->&____s->seqcount ->&xa->xa_lock#9 FD: 62 BD: 6 ++++: &sbi->sb_lock ->&sbi->error_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&fq->mq_flush_lock ->&rq->__lock ->bit_wait_table + i ->&obj_hash[i].lock ->&base->lock FD: 41 BD: 5 +.+.: &eti->extent_tree_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&eti->extent_lock ->&et->lock FD: 168 BD: 14 ++++: &sbi->cp_rwsem ->&fi->i_xattr_sem ->&sbi->node_change ->&sbi->node_write ->&sem->wait_lock ->&rq->__lock ->lock#4 ->&fi->i_sem#2 ->&sbi->inode_lock[i] ->&sb->s_type->i_lock_key#41 ->&wb->list_lock ->&nm_i->nat_tree_lock ->&xa->xa_lock#9 ->&sm_info->curseg_lock ->&io->io_rwsem ->&et->lock ->&sbi->stat_lock ->key#32 ->&nm_i->nid_list_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&obj_hash[i].lock ->&sit_i->sentry_lock ->&rcu_state.gp_wq ->lock#5 ->&lruvec->lru_lock ->&wb->work_lock FD: 135 BD: 17 +.+.: &fi->i_sem/1 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&sbi->stat_lock ->&nm_i->nat_tree_lock ->pool_lock#2 ->&sb->s_type->i_lock_key#41 ->&wb->list_lock ->&wb->work_lock ->&c->lock ->&____s->seqcount#2 ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&sbi->inode_lock[i] ->&et->lock ->&n->list_lock ->&rq->__lock FD: 1 BD: 22 +.+.: &sbi->stat_lock FD: 60 BD: 21 +.+.: &sbi->inode_lock[i] ->&sb->s_type->i_lock_key#41 FD: 13 BD: 22 ++++: &et->lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&eti->extent_lock ->&obj_hash[i].lock ->quarantine_lock FD: 193 BD: 3 +.+.: &sb->s_type->i_mutex_key#24 ->&fi->i_xattr_sem ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&sbi->node_change ->&sb->s_type->i_lock_key#41 ->&sbi->inode_lock[i] ->&rq->__lock ->&wb->list_lock ->fs_reclaim ->&sbi->cp_rwsem ->&fi->i_sem#2 ->&fi->i_gc_rwsem[WRITE] ->&fi->i_size_lock ->&sbi->gc_lock ->&sbi->pin_sem FD: 163 BD: 15 ++++: &sbi->node_change ->&sbi->inode_lock[i] ->&sbi->node_write ->lock#4 ->lock#5 ->&nm_i->nat_tree_lock ->&io->io_rwsem ->&sb->s_type->i_lock_key#41 ->rcu_node_0 ->&rq->__lock ->&et->lock ->&nm_i->nid_list_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->&sbi->stat_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 138 BD: 15 ++++: &fi->i_sem#2 ->key#32 ->&nm_i->nat_tree_lock ->&rq->__lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&sbi->stat_lock ->pool_lock#2 ->&sb->s_type->i_lock_key#41 ->&wb->list_lock ->&wb->work_lock ->&fi->i_xattr_sem ->&c->lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&sbi->inode_lock[i] ->&et->lock FD: 1 BD: 20 ....: key#32 FD: 170 BD: 6 +.+.: &sbi->gc_lock ->&sbi->cp_global_sem ->&sit_i->sentry_lock ->lock#4 ->&nm_i->nat_tree_lock FD: 159 BD: 18 ++++: &sbi->node_write ->&xa->xa_lock#9 ->&sm_info->curseg_lock ->&io->io_rwsem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&nm_i->nat_tree_lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&sbi->fsync_node_lock ->&nm_i->nid_list_lock ->key#31 ->key#32 ->key#33 ->&sit_i->sentry_lock ->&dirty_i->seglist_lock ->&array[i].curseg_mutex ->tk_core.seq.seqcount ->&sbi->cp_lock ->lock#4 ->&sb->s_type->i_lock_key#41 ->&wb->list_lock ->stock_lock ->lock#5 ->&sbi->cp_wait ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->&im->ino_lock ->&sbi->stat_lock ->&dcc->cmd_lock ->&lock->wait_lock ->&p->pi_lock ->rcu_node_0 ->&wb->work_lock FD: 67 BD: 19 .+.+: &sm_info->curseg_lock ->&array[i].curseg_mutex ->&sit_i->sentry_lock FD: 47 BD: 23 +.+.: &dcc->cmd_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&rq->__lock ->&lock->wait_lock ->&dc->lock ->tk_core.seq.seqcount ->rcu_node_0 ->&obj_hash[i].lock ->&x->wait#40 FD: 1 BD: 22 +.+.: &io->io_lock FD: 78 BD: 19 ++++: &io->io_rwsem ->&io->io_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->rcu_node_0 ->&rq->__lock ->&c->lock ->&fq->mq_flush_lock ->&sb->map[i].swap_lock ->&____s->seqcount#2 ->&____s->seqcount ->&rcu_state.gp_wq ->&obj_hash[i].lock ->&base->lock ->(console_sem).lock ->&xa->xa_lock#9 ->&sbi->cp_lock ->&sbi->error_lock FD: 1 BD: 19 ..-.: &sbi->fsync_node_lock FD: 1 BD: 19 ....: key#33 FD: 1 BD: 20 ....: &sbi->cp_lock FD: 29 BD: 248 ..-.: &sbi->cp_wait ->&p->pi_lock FD: 4 BD: 399 +.+.: &im->ino_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 48 BD: 1 .+.+: sb_internal#5 ->&dcc->cmd_lock FD: 373 BD: 1 +.+.: &type->s_umount_key#65 ->&rq->__lock ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->&p->pi_lock ->&x->wait ->&sbi->gc_thread->fggc_wq ->&meta->lock ->kfence_freelist_lock ->&sbi->cp_global_sem ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#41 ->&dentry->d_lock/1 ->pool_lock#2 ->rcu_node_0 ->&bdi->wb_waitq ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&sbi->gc_lock ->&s->s_inode_list_lock ->&sbi->inode_lock[i] ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&xa->xa_lock#9 ->inode_hash_lock ->&fsnotify_mark_srcu ->&eti->extent_tree_lock ->proc_subdir_lock ->&ent->pde_unload_lock ->proc_inum_ida.xa_lock ->&root->kernfs_rwsem ->sysfs_symlink_target_lock ->kernfs_idr_lock ->&k->list_lock ->&x->wait#39 ->&c->lock ->&sbi->umount_mutex ->&io->io_rwsem ->&wb->list_lock ->f2fs_stat_lock ->&nm_i->nid_list_lock ->&nm_i->nat_tree_lock ->&dirty_i->seglist_lock ->&____s->seqcount ->cpu_hotplug_lock ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->lock ->&wq->mutex ->wq_pool_mutex ->&rcu_state.expedited_wq ->&sem->wait_lock ->key ->&nm_i->build_lock ->key#32 FD: 1 BD: 2 ....: &sbi->gc_thread->fggc_wq FD: 130 BD: 8 +.+.: &sbi->s_lock ->&mapping->i_private_lock ->mmu_notifier_invalidate_range_start ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&sbi->inode_hash_lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->stock_lock ->&s->s_inode_list_lock ->batched_entropy_u32.lock ->&ei->cache_lru_lock ->&xa->xa_lock#9 ->lock#4 ->tk_core.seq.seqcount ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq ->&sb->map[i].swap_lock ->bit_wait_table + i ->inode_hash_lock ->&sb->s_type->i_lock_key#42 ->&wb->list_lock FD: 44 BD: 9 +.+.: &sbi->inode_hash_lock ->&sb->s_type->i_lock_key#42 FD: 1 BD: 2 ....: &x->wait#39 FD: 447 BD: 6 +.+.: &sb->s_type->i_mutex_key#26 ->&fi->lock ->fs_reclaim ->pool_lock#2 ->&fiq->lock ->&req->waitq ->&rq->__lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&c->lock ->&n->list_lock ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->rcu_node_0 ->&rcu_state.expedited_wq ->&mm->mmap_lock ->&io->lock ->&fc->bg_lock ->&x->wait#49 ->&sem->wait_lock FD: 30 BD: 24 ..-.: &dc->lock ->&x->wait#40 FD: 29 BD: 25 ..-.: &x->wait#40 ->&p->pi_lock FD: 29 BD: 18 -.-.: &rq_wait->wait ->&p->pi_lock FD: 1 BD: 23 +.+.: &eti->extent_lock FD: 136 BD: 1 .+.+: sb_writers#17 ->mount_lock ->&sb->s_type->i_mutex_key#25/1 ->&sb->s_type->i_lock_key#42 ->&s->s_inode_list_lock ->&ei->cache_lru_lock ->tk_core.seq.seqcount ->&wb->list_lock ->&sbi->fat_lock ->&xa->xa_lock#9 ->&sbi->inode_hash_lock ->inode_hash_lock ->&obj_hash[i].lock ->&fsnotify_mark_srcu FD: 123 BD: 2 +.+.: &sb->s_type->i_mutex_key#25/1 ->rename_lock.seqcount ->&dentry->d_lock ->tomoyo_ss ->&sb->s_type->i_mutex_key#25 ->&sb->s_type->i_lock_key#42 ->&fsnotify_mark_srcu FD: 55 BD: 2 +.+.: &sbi->fat_lock ->&mapping->i_private_lock ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#9 FD: 113 BD: 1 +.+.: &type->s_umount_key#66 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->pool_lock#2 ->rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/1 ->&sb->s_type->i_lock_key#33 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&rq->__lock ->&fsnotify_mark_srcu ->binderfs_minors_mutex ->rcu_node_0 FD: 1 BD: 5 ....: (&net->ipv6.ip6_fib_timer) FD: 19 BD: 2 +.+.: &q->timer_mutex ->&tmr->lock ->register_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 2 BD: 5 ....: slave_active_lock ->&timer->lock FD: 92 BD: 2 +.+.: (work_completion)(&vsock->pkt_work) ->&list->lock#24 ->vsock_table_lock ->sk_lock-AF_VSOCK ->slock-AF_VSOCK ->&obj_hash[i].lock ->pool_lock#2 ->&dir->lock ->stock_lock FD: 86 BD: 7 +.+.: sk_lock-AF_VSOCK/1 ->slock-AF_VSOCK ->fs_reclaim ->pool_lock#2 ->&vvs->tx_lock ->vsock_table_lock ->&vvs->rx_lock ->&list->lock#24 ->&c->lock ->&____s->seqcount#2 ->&rq->__lock ->&____s->seqcount ->&obj_hash[i].lock ->&base->lock ->clock-AF_VSOCK FD: 1 BD: 8 +...: &list->lock#24 FD: 93 BD: 1 +.+.: (wq_completion)vsock-loopback ->(work_completion)(&vsock->pkt_work) FD: 1 BD: 9 ....: p9_poll_lock FD: 1 BD: 4 +...: clock-AF_XDP FD: 1 BD: 67 +...: _xmit_NETROM#2 FD: 442 BD: 67 +.+.: &xs->mutex ->fs_reclaim ->&rq->__lock ->pool_lock#2 ->umem_ida.xa_lock ->&mm->mmap_lock ->free_vmap_area_lock ->&vn->busy.lock ->&c->lock ->&____s->seqcount ->init_mm.page_table_lock ->&pool->xsk_tx_list_lock FD: 1 BD: 4 +...: &xs->map_list_lock FD: 1 BD: 5 +...: clock-AF_PPPOX FD: 30 BD: 8 +...: slock-AF_VSOCK ->&sk->sk_lock.wq FD: 91 BD: 6 +.+.: sk_lock-AF_VSOCK ->slock-AF_VSOCK ->vsock_table_lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&vvs->rx_lock ->&list->lock#24 ->&rq->__lock ->&dir->lock ->&obj_hash[i].lock ->sk_lock-AF_VSOCK/1 ->&ei->socket.wq.wait ->&vvs->tx_lock ->rcu_node_0 ->&____s->seqcount ->&base->lock ->clock-AF_VSOCK FD: 1 BD: 8 +...: &vvs->rx_lock FD: 83 BD: 4 +.+.: (work_completion)(&(&l->destroy_dwork)->work) ->&cgrp->pidlist_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 35 BD: 1 ..-.: &(&l->destroy_dwork)->timer FD: 1 BD: 186 ....: key#34 FD: 1 BD: 8 +...: clock-AF_VSOCK FD: 459 BD: 4 +.+.: sk_lock-AF_PPPOX ->slock-AF_PPPOX ->&pn->hash_lock ->fs_reclaim ->pool_lock#2 ->&dir->lock ->&pn->all_channels_lock ->&pch->chan_sem ->&pch->upl ->&pf->rwait ->&obj_hash[i].lock ->clock-AF_PPPOX ->&c->lock ->&mm->mmap_lock ->stock_lock ->&n->list_lock ->&rq->__lock ->&ppp->wlock ->pcpu_lock ->krc.lock ->&dir->lock#2 ->&ps->sk_lock ->&tunnel->list_lock ->&x->wait#2 ->&list->lock#29 FD: 1 BD: 5 +.-.: slock-AF_PPPOX FD: 1 BD: 167 +.+.: &head->lock FD: 102 BD: 183 +.+.: btrfs-treloc-01/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&eb->refs_lock ->&xa->xa_lock#9 ->key#25 ->&swapped_blocks->lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&rq->__lock FD: 1 BD: 3 ....: &tmr->lock FD: 1 BD: 6 ....: &mbus->lock FD: 1 BD: 1 ....: &rs->lock#3 FD: 207 BD: 1 +.+.: (wq_completion)hci2#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) FD: 451 BD: 2 +.+.: (work_completion)(&rdev->mgmt_registrations_update_wk) ->&rdev->wiphy.mtx FD: 1 BD: 68 ....: (&peer->timer_zero_key_material) FD: 1 BD: 67 +.+.: raw_notifier_lock FD: 1 BD: 67 +.+.: bcm_notifier_lock FD: 1 BD: 67 +.+.: isotp_notifier_lock FD: 14 BD: 8 ..-.: &cable->lock ->&obj_hash[i].lock ->&base->lock FD: 81 BD: 1 +.+.: (wq_completion)tc_filter_workqueue ->(work_completion)(&(rwork)->work) FD: 1 BD: 8 ....: (&usbhid->io_retry) FD: 1 BD: 3 ....: &hdev->debug_wait FD: 318 BD: 1 +.+.: bpf_stats_enabled_mutex ->&newf->file_lock ->fs_reclaim ->pool_lock#2 ->stock_lock ->&sb->s_type->i_lock_key#16 ->cpu_hotplug_lock FD: 80 BD: 67 +.+.: &chain->filter_chain_lock ->&block->lock ->&block->proto_destroy_lock FD: 1 BD: 9 ....: &TxBlock[k].waitor FD: 187 BD: 70 +.+.: team->team_lock_key#22 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->rcu_node_0 ->&rq->__lock ->&dev_addr_list_lock_key#8 ->&c->lock ->&dir->lock#2 ->input_pool.lock ->netdev_rename_lock.seqcount ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&n->list_lock ->lweventlist_lock ->(console_sem).lock ->&____s->seqcount#2 ->&____s->seqcount FD: 360 BD: 1 +.+.: &type->s_umount_key#74/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq ->bit_wait_table + i ->&n->list_lock ->&____s->seqcount#2 ->&xa->xa_lock#5 ->remove_cache_srcu ->&rcu_state.expedited_wq ->&sb->s_type->i_lock_key#45 ->(console_sem).lock ->&sbi->s_alloc_mutex ->&sbi->s_cred_lock ->&dentry->d_lock FD: 36 BD: 1 +.-.: (&lo->timer) ->&lo->lo_work_lock FD: 1 BD: 5 +.-.: x25_list_lock FD: 1 BD: 72 +...: &ifmgd->teardown_lock FD: 39 BD: 1 +.+.: (wq_completion)bond0#24 ->(work_completion)(&(&slave->notify_work)->work) FD: 38 BD: 2 +.+.: p9_poll_work ->p9_poll_lock ->&m->req_lock ->&req->wq FD: 445 BD: 2 +.+.: (work_completion)(&m->rq) ->&pipe->mutex ->tk_core.seq.seqcount ->&m->req_lock ->&req->wq FD: 1 BD: 37 ....: rlock-AF_BLUETOOTH FD: 1 BD: 67 +...: &sch->root_lock_key#789 FD: 1 BD: 85 ..-.: elock-AF_INET FD: 207 BD: 1 +.+.: (wq_completion)hci5#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) FD: 448 BD: 2 .+.+: sb_writers#19 ->&sb->s_type->i_mutex_key#26 ->&sem->wait_lock ->&p->pi_lock FD: 186 BD: 4 +.+.: &fi->i_gc_rwsem[WRITE] ->mapping.invalidate_lock#5 FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#34 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 134 BD: 2 +.+.: &type->i_mutex_dir_key#17 ->rename_lock.seqcount ->fs_reclaim ->&rq->__lock ->stock_lock ->&dentry->d_lock ->tomoyo_ss ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&s->s_inode_list_lock ->&(imap->im_aglock[index]) ->inode_hash_lock ->tk_core.seq.seqcount ->jfsTxnLock ->&jfs_ip->commit_mutex ->&sb->s_type->i_lock_key#50 ->&n->list_lock ->&bmp->db_bmaplock ->lock#4 ->&mp->wait FD: 142 BD: 1 .+.+: sb_writers#25 ->mount_lock ->&type->i_mutex_dir_key#17 ->&type->i_mutex_dir_key#17/1 FD: 1 BD: 67 +...: &macsec_netdev_addr_lock_key#2/2 FD: 1093 BD: 15 +.+.: &devlink->lock_key#23 ->crngs.lock ->fs_reclaim ->&c->lock ->devlinks.xa_lock ->&xa->xa_lock#19 ->pcpu_alloc_mutex ->&obj_hash[i].lock ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq ->&(&fn_net->fib_chain)->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->stack_depot_init_mutex ->pool_lock#2 ->rtnl_mutex.wait_lock ->&p->pi_lock ->&devlink_port->type_lock ->&nsim_trap_data->trap_lock FD: 1 BD: 3 ....: &config->recv_wq FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg2#43 ->(work_completion)(&peer->transmit_handshake_work) FD: 5 BD: 1 +...: _xmit_X25#2 ->&lapbeth->up_lock FD: 1 BD: 2 +...: nr_node_list_lock FD: 5 BD: 1 +...: &nr_netdev_xmit_lock_key ->nr_node_list_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 5 BD: 68 +...: _xmit_SLIP#2 ->&eql->queue.lock FD: 34 BD: 1 +.-.: (&timer) ->&obj_hash[i].lock ->&base->lock ->&txlock ->&txwq FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#17 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 373 BD: 1 +.+.: &type->s_umount_key#83/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->nls_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&xa->xa_lock#5 ->&obj_hash[i].lock ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->&base->lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq ->bit_wait_table + i ->&folio_wait_table[i] ->&mp->wait ->&n->list_lock ->jfsLCacheLock ->&lbuf->l_ioevent ->&wb->list_lock ->&wb->work_lock ->&(log)->loglock ->&jfs_ip->rdwrlock/1 ->&sb->s_type->i_lock_key#50 ->&dentry->d_lock ->(console_sem).lock ->&x->wait#23 FD: 118 BD: 1 +.+.: &type->s_umount_key#71 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#43 ->&dentry->d_lock/1 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->inode_hash_lock ->&fsnotify_mark_srcu ->pool_lock#2 FD: 43 BD: 500 +.+.: &sb->s_type->i_lock_key#43 ->&dentry->d_lock FD: 359 BD: 1 +.+.: &type->s_umount_key#70/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->&sb->map[i].swap_lock ->rcu_node_0 ->&obj_hash[i].lock ->lock#5 ->&lruvec->lru_lock ->&____s->seqcount#2 ->&c->lock ->&xa->xa_lock#5 ->(console_sem).lock ->&sb->s_type->i_lock_key#43 ->&dentry->d_lock FD: 1 BD: 44 +.+.: &file->master_lookup_lock FD: 1 BD: 82 ..-.: key#37 FD: 158 BD: 19 +.+.: &type->i_mutex_dir_key#5/5 ->&xattrs->lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&type->i_mutex_dir_key#5 ->rename_lock.seqcount ->stock_lock ->&dentry->d_lock ->&sb->s_type->i_lock_key ->&type->i_mutex_dir_key#5/2 FD: 1 BD: 74 +.+.: &map->owner.lock FD: 165 BD: 17 +.+.: &type->s_vfs_rename_key#2 ->&type->i_mutex_dir_key#5/1 ->&type->i_mutex_dir_key#5/5 FD: 170 BD: 178 +.+.: btrfs-extent-01#2/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&eb->refs_lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->&obj_hash[i].lock ->&xa->xa_lock#9 ->key#25 ->lock#4 ->btrfs-extent-00 ->btrfs-extent-00/1 ->&fs_info->trans_lock ->&sb->s_type->i_lock_key#38 ->&wb->list_lock ->&wb->work_lock FD: 1 BD: 67 +...: &sch->root_lock_key#736 FD: 1 BD: 192 +.+.: btrfs-extent-00/2 FD: 1 BD: 3 ....: (&dev->timer) FD: 29 BD: 77 ....: &tfile->socket.wq.wait ->&p->pi_lock FD: 29 BD: 14 ....: &thread->wait ->&p->pi_lock ->pool_lock#2 FD: 324 BD: 4 ++++: &ovl_i_mutex_dir_key[depth]#2 ->rename_lock.seqcount ->&type->i_mutex_dir_key#5 ->&dentry->d_lock ->fs_reclaim ->pool_lock#2 ->stock_lock ->tomoyo_ss ->&sb->s_type->i_lock_key ->&c->lock ->&obj_hash[i].lock ->&ovl_i_lock_key[depth] ->&sb->s_type->i_lock_key#37 ->tk_core.seq.seqcount ->(console_sem).lock ->sb_writers#3 ->&sb->s_type->i_lock_key#45 ->&type->i_mutex_dir_key#15 ->&type->i_mutex_dir_key#3 ->&ei->xattr_sem ->inode_hash_lock ->mmu_notifier_invalidate_range_start ->smack_known_lock FD: 1 BD: 5 +.+.: &ff->mutex FD: 1 BD: 1 ....: &dev->event_lock#2 FD: 105 BD: 4 +.+.: hcd->address0_mutex ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->devtree_lock ->&dev->power.lock ->device_state_lock ->&bus->devnum_next_mutex ->mmu_notifier_invalidate_range_start ->ehci_cf_port_reset_rwsem ->(console_sem).lock ->console_owner_lock ->console_owner ->&dum_hcd->dum->lock ->&rq->__lock ->&x->wait#19 ->&base->lock ->(&timer.timer) ->hcd_urb_list_lock ->&____s->seqcount ->quirk_mutex ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 30 BD: 22 -...: &queue->lock ->pool_lock#2 ->semaphore->lock#5 ->&c->lock ->&n->list_lock FD: 3 BD: 140 +.+.: (work_completion)(flush) ->&list->lock#12 ->process_queue_bh_lock FD: 76 BD: 1 +.-.: (&msk->sk.icsk_retransmit_timer) ->slock-AF_INET#2 FD: 43 BD: 4 +.+.: &sbi->lookup_lock ->&dentry->d_lock FD: 1 BD: 4 +...: raw_lock FD: 5 BD: 2 +.+.: (work_completion)(&ruleset->work_free) ->stock_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg1#33 ->(work_completion)(&peer->transmit_handshake_work) FD: 37 BD: 4 +.+.: sk_lock-AF_CAIF ->slock-AF_CAIF ->&obj_hash[i].lock ->&this->info_list_lock ->(console_sem).lock ->&x->wait#2 ->&rq->__lock ->&ei->socket.wq.wait ->clock-AF_CAIF FD: 7 BD: 1 +.+.: put_task_map-wait-type-override#3 ->&obj_hash[i].lock ->percpu_counters_lock ->pcpu_lock ->pool_lock#2 ->stock_lock FD: 137 BD: 174 +.+.: btrfs-log-01#2/1 ->mmu_notifier_invalidate_range_start ->&tree->lock ->&fs_info->block_group_cache_lock ->&space_info->lock ->&eb->refs_lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->&obj_hash[i].lock ->&xa->xa_lock#9 ->btrfs-log-00 ->btrfs-log-00/1 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 138 BD: 173 ++++: btrfs-log-01#2 ->&eb->refs_lock ->key#25 ->&rsv->lock ->&space_info->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->btrfs-log-01#2/1 ->btrfs-log-00 ->btrfs-log-00/1 ->&c->lock ->&____s->seqcount#2 ->&rq->__lock FD: 98 BD: 177 +.+.: btrfs-log-00/6 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&root->accounting_lock ->&xa->xa_lock#9 ->&eb->refs_lock FD: 1 BD: 5 ....: rlock-AF_KCM FD: 1 BD: 5 +...: clock-AF_KCM FD: 1 BD: 5 +...: slock-AF_KCM FD: 3 BD: 4 +.+.: sk_lock-AF_KCM ->slock-AF_KCM ->clock-AF_KCM FD: 2 BD: 4 +...: &mux->rx_lock ->rlock-AF_KCM FD: 1 BD: 4 +...: &mux->lock FD: 134 BD: 176 +.+.: btrfs-log-01/7 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&root->accounting_lock ->&xa->xa_lock#9 ->key#25 ->&eb->refs_lock ->&rsv->lock ->&space_info->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->&____s->seqcount ->stock_lock ->lock#4 ->&mapping->i_private_lock ->lock ->btrfs-log-00/6 FD: 1 BD: 5 +...: slock-AF_CAIF FD: 1 BD: 4 +...: rlock-AF_CAIF FD: 137 BD: 192 +.+.: btrfs-extent-01/7 ->&rq->__lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&____s->seqcount ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&root->accounting_lock ->&xa->xa_lock#9 ->&eb->refs_lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->lock#4 ->&mapping->i_private_lock ->lock ->btrfs-extent-00/6 FD: 1 BD: 4 +.+.: &knet->mutex FD: 1 BD: 5 +...: clock-AF_CAIF FD: 171 BD: 177 ++++: btrfs-extent-01#2 ->&rq->__lock ->btrfs-extent-00 ->lock#4 ->&eb->refs_lock ->key#25 ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->&xa->xa_lock#9 ->&mapping->i_private_lock ->lock ->btrfs-extent-01#2/1 ->btrfs-extent-00/1 ->stock_lock ->btrfs-extent-00/6 FD: 87 BD: 17 +.+.: lock#10 ->inode_hash_lock FD: 193 BD: 7 .+.+: mapping.invalidate_lock#4 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->&tree->lock ->&ei->ordered_tree_lock ->&tree->lock#2 ->&c->lock ->&obj_hash[i].lock ->&____s->seqcount#2 ->&rq->__lock ->rcu_node_0 ->remove_cache_srcu ->btrfs-tree-00 ->&eb->refs_lock ->btrfs-tree-01 ->&fs_info->mapping_tree_lock ->&fs_info->dev_replace.rwsem ->&fs_info->global_root_lock ->btrfs-csum-00 ->tk_core.seq.seqcount FD: 1 BD: 1 +.+.: &newdev->requests_lock FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#44 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 28 BD: 1 .+.+: drm_unplug_srcu ->&rq->__lock FD: 1 BD: 167 ....: &cur_trans->pending_wait FD: 135 BD: 183 +.+.: btrfs-tree-00/2 ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-tree-00/4 FD: 29 BD: 167 ....: &cur_trans->writer_wait ->&p->pi_lock FD: 102 BD: 183 +.+.: btrfs-tree-00/6 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&root->accounting_lock ->&xa->xa_lock#9 ->key#25 ->&eb->refs_lock FD: 1 BD: 173 ..-.: key#36 FD: 102 BD: 184 +.+.: btrfs-tree-00/4 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&c->lock ->&eb->refs_lock ->&fs_info->buffer_lock ->&mapping->i_private_lock ->&obj_hash[i].lock ->&xa->xa_lock#9 FD: 102 BD: 193 +.+.: btrfs-extent-00/6 ->&rq->__lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&root->accounting_lock ->&xa->xa_lock#9 ->key#25 ->&eb->refs_lock FD: 1 BD: 5 +.+.: &pnsocks.lock FD: 34 BD: 4 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->sco_sk_list.lock ->&ei->socket.wq.wait FD: 1 BD: 6 +.+.: sco_sk_list.lock FD: 1 BD: 3 ....: &tags->lock FD: 35 BD: 1 ..-.: &(&pool->release_dw)->timer FD: 207 BD: 1 +.+.: (wq_completion)hci4#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) FD: 140 BD: 2 +.+.: &type->i_mutex_dir_key#17/1 ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&c->lock ->pool_lock#2 ->&mp->wait ->&obj_hash[i].lock ->tomoyo_ss ->&s->s_inode_list_lock ->&bmp->db_bmaplock ->&(imap->im_aglock[index]) ->inode_hash_lock ->tk_core.seq.seqcount ->jfsTxnLock ->&jfs_ip->commit_mutex ->&sb->s_type->i_lock_key#50 ->&____s->seqcount#2 ->&____s->seqcount ->&jfs_ip->rdwrlock/1 ->lock#4 ->&jfs_ip->xattr_sem ->rcu_node_0 ->&rq->__lock ->&sb->s_type->i_mutex_key#33/4 FD: 283 BD: 3 +.+.: &type->i_mutex_dir_key#10/1 ->rename_lock.seqcount ->fs_reclaim ->&c->lock ->stock_lock ->&dentry->d_lock ->&rq->__lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->btrfs-tree-01 ->btrfs-tree-00 ->&eb->refs_lock ->&obj_hash[i].lock ->tomoyo_ss ->&s->s_inode_list_lock ->&fs_info->qgroup_lock ->&root->qgroup_meta_rsv_lock ->&space_info->lock ->&rsv->lock ->&____s->seqcount ->sb_internal#4 ->btrfs_trans_num_writers FD: 1 BD: 4 ....: rds_cong_monitor_lock FD: 1 BD: 67 +...: &sch->root_lock_key#774 FD: 119 BD: 1 +.+.: &type->s_umount_key#72/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->sb_lock ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#44 ->&dentry->d_lock FD: 5 BD: 1 ....: &trie->lock ->stock_lock ->pool_lock#2 ->&zone->lock ->&____s->seqcount FD: 43 BD: 365 +.+.: &sb->s_type->i_lock_key#44 ->&dentry->d_lock FD: 1088 BD: 2 +.+.: (crda_timeout).work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 35 BD: 1 ..-.: net/wireless/reg.c:535 FD: 1 BD: 301 ....: mnt_group_ida.xa_lock FD: 144 BD: 13 ++++: &sb->s_type->i_mutex_key#27 ->namespace_sem ->rename_lock.seqcount ->fs_reclaim ->&c->lock ->stock_lock ->&dentry->d_lock ->tomoyo_ss ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#44 ->&wb->list_lock FD: 1 BD: 214 +.+.: release_agent_path_lock FD: 1 BD: 185 +.+.: &sbi->s_mb_largest_free_orders_locks[i] FD: 1 BD: 185 +.+.: &sbi->s_mb_avg_fragment_size_locks[i] FD: 1 BD: 6 ....: (&tw->tw_timer) FD: 110 BD: 2 ++++: &type->s_umount_key#73 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->pool_lock#2 ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#44 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&dentry->d_lock/1 FD: 1 BD: 4 +...: clock-AF_KEY FD: 1 BD: 1 ....: &____s->seqcount#13 FD: 1 BD: 1 +.+.: capidev_list_lock FD: 445 BD: 2 +.+.: (work_completion)(&m->wq) ->&m->req_lock ->&pipe->mutex ->&pipe->rd_wait FD: 1 BD: 67 +...: &sch->root_lock_key#339 FD: 1 BD: 14 ....: disc_data_lock#2 FD: 1 BD: 67 +...: &sch->root_lock_key#327 FD: 29 BD: 69 ....: &sw_ctx_rx->wq ->&p->pi_lock FD: 318 BD: 2 +.+.: cpool_cleanup_work ->cpool_mutex FD: 458 BD: 1 +.+.: (wq_completion)loop3 ->(work_completion)(&worker->work) ->(work_completion)(&lo->rootcg_work) FD: 1 BD: 5 +...: slock-AF_X25 FD: 1 BD: 3 +.+.: dquirks_lock FD: 35 BD: 1 ..-.: net/ipv4/tcp_ipv4.c:1226 FD: 1 BD: 1 +.+.: &pstr->oss.setup_mutex FD: 175 BD: 12 +.+.: &sb->s_type->i_mutex_key#8/4 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&ei->i_es_lock ->stock_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&s->s_inode_list_lock ->dq_list_lock ->&dquot->dq_lock ->dq_data_lock ->&meta_group_info[i]->alloc_sem ->tk_core.seq.seqcount ->inode_hash_lock ->batched_entropy_u32.lock ->dquot_srcu ->&ei->xattr_sem ->&ei->i_raw_lock ->&sb->s_type->i_lock_key#23 ->rename_lock ->&n->list_lock ->&rq->__lock FD: 3 BD: 4 +.+.: subsys mutex#77 ->&k->k_lock FD: 1 BD: 3 +.+.: subsys mutex#78 FD: 92 BD: 7 +.+.: &usbhid->mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&n->list_lock ->&dev->power.lock ->&usbhid->lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->(&usbhid->io_retry) ->hcd_urb_unlink_lock ->&dum_hcd->dum->lock ->usb_kill_urb_queue.lock FD: 196 BD: 3 +.+.: minors_rwsem ->fs_reclaim ->pool_lock#2 ->&x->wait#8 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->uevent_sock_mutex ->&k->k_lock ->subsys mutex#77 ->&hdev->ll_open_lock ->&dev->list_lock ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->&n->list_lock FD: 1 BD: 3 +.+.: &hdrv->dyn_lock FD: 1 BD: 3 ....: semaphore->lock#6 FD: 1 BD: 4 ....: &dev->list_lock FD: 72 BD: 172 +.+.: &dqp->q_qlock/1 ->pool_lock#2 ->&rq->__lock ->&cil->xc_ctx_lock ->&c->lock FD: 29 BD: 23 -...: semaphore->lock#5 ->&p->pi_lock FD: 55 BD: 1 +.+.: &udc->connect_lock ->&dum_hcd->dum->lock ->&rq->__lock ->hcd_root_hub_lock ->&queue->lock ->udc_lock FD: 73 BD: 167 +.+.: &xfs_nondir_ilock_class#3/1 ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&iip->ili_lock ->&cil->xc_ctx_lock FD: 1 BD: 5 +.+.: &bus->devnum_next_mutex FD: 93 BD: 2 +.+.: (work_completion)(&hcd->wakeup_work) FD: 44 BD: 1 +.-.: (&hcd->rh_timer) ->&dum_hcd->dum->lock FD: 31 BD: 4 +.+.: sk_lock-AF_X25 ->&rq->__lock ->slock-AF_X25 ->&obj_hash[i].lock ->x25_list_lock ->rlock-AF_X25 FD: 134 BD: 2 ++++: &type->s_umount_key#75 ->pool_lock#2 ->rcu_node_0 ->&rq->__lock ->&rcu_state.expedited_wq ->&sbi->s_alloc_mutex ->&sb->s_type->i_lock_key#3 ->&xa->xa_lock#9 ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->lock#4 ->lock#5 ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/1 ->&sb->s_type->i_lock_key#45 ->&s->s_inode_list_lock ->&mapping->i_private_lock ->&ei->i_extent_cache_lock ->inode_hash_lock ->&fsnotify_mark_srcu ->&base->lock ->bit_wait_table + i FD: 39 BD: 2 +.+.: key_gc_work ->key_serial_lock ->&obj_hash[i].lock ->&x->wait#2 ->&rq->__lock ->keyring_name_lock ->pool_lock#2 ->root_key_user.lock FD: 1 BD: 2 ....: hugetlb_lock FD: 1 BD: 1 ....: printk_limits[4].lock FD: 31 BD: 1 +.-.: (&timer->tl) ->&dev->lock#2 FD: 128 BD: 1 +.+.: &type->s_umount_key#79 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#47 ->&dentry->d_lock/1 ->&sbi->s_lock#2 ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->inode_hash_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#3 ->&wb->list_lock ->&wb->work_lock ->pool_lock#2 FD: 1 BD: 1 ....: _rs.lock#7 FD: 6 BD: 219 +.+.: &cluster->refill_lock ->&ctl->tree_lock ->&cluster->lock FD: 208 BD: 153 .+.+: sb_pagefaults#3 ->&rq->__lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#35 ->fs_reclaim ->pool_lock#2 ->sb_internal#2 ->&obj_hash[i].lock ->mapping.invalidate_lock#3 FD: 1 BD: 15 ..-.: &log->free_wait FD: 128 BD: 1 +.+.: &sb->s_type->i_mutex_key#32 ->namespace_sem FD: 1 BD: 2 +.+.: &sbi->s_lock#2 FD: 11 BD: 67 +...: &p->tcfa_lock ->&(to_police(*a)->tcfp_lock) FD: 4 BD: 68 +...: &(to_police(*a)->tcfp_lock) ->tk_core.seq.seqcount FD: 1 BD: 67 +.+.: &r->consumer_lock#4 FD: 78 BD: 67 +.+.: &tn->idrinfo->lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 556 BD: 1 +.+.: (wq_completion)hci2 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 294 BD: 4 +.+.: &ovl_i_mutex_key[depth]/4 ->&dentry->d_lock ->tomoyo_ss ->&ovl_i_lock_key[depth] FD: 24 BD: 2 +.+.: (work_completion)(&ns->work) ->sysctl_lock ->&obj_hash[i].lock ->pool_lock#2 ->keyring_name_lock ->proc_inum_ida.xa_lock ->stock_lock FD: 39 BD: 1 +.+.: (wq_completion)bond0#22 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 1 BD: 67 +...: &sch->root_lock_key#812 FD: 1 BD: 2 +.+.: &sbi->wq_mutex FD: 35 BD: 13 ..-.: &(log)->gclock ->jfsLCacheLock ->&TxAnchor.LazyLock ->&TxBlock[k].gcwait FD: 112 BD: 1 +.+.: &type->s_umount_key#77 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->&sbi->wq_mutex ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#46 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->pool_lock#2 ->&fsnotify_mark_srcu ->&sbi->lookup_lock ->krc.lock ->&rq->__lock ->&dentry->d_lock/1 FD: 45 BD: 13 +.+.: &sb->s_type->i_mutex_key#27/4 ->&dentry->d_lock ->tk_core.seq.seqcount ->rename_lock FD: 143 BD: 10 +.+.: &type->s_vfs_rename_key#5 ->&sb->s_type->i_mutex_key#27/1 ->&sb->s_type->i_mutex_key#27/5 FD: 156 BD: 12 +.+.: &sb->s_type->i_mutex_key#27/5 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->stock_lock ->&dentry->d_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#44 ->&sb->s_type->i_mutex_key#27 ->&sb->s_type->i_mutex_key#27/4 ->&sb->s_type->i_lock_key#37 ->&fsnotify_mark_srcu ->&xa->xa_lock#9 ->&obj_hash[i].lock FD: 1 BD: 4 +...: clock-AF_IEEE802154 FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#17 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg2#39 ->(work_completion)(&peer->transmit_handshake_work) FD: 1 BD: 67 +...: &sl->lock FD: 1 BD: 1 ....: &head->lock#2 FD: 1 BD: 5 +.+.: &card->memory_mutex FD: 34 BD: 6 ..-.: &group->lock#2 ->tk_core.seq.seqcount ->&timer->lock ->&runtime->sleep ->&runtime->tsleep ->&card->ctl_files_rwlock ->&cable->lock FD: 1 BD: 7 ..-.: &runtime->tsleep FD: 29 BD: 7 ..-.: &runtime->sleep ->&p->pi_lock FD: 89 BD: 1 +.+.: nlk_cb_mutex-SOCK_DIAG ->fs_reclaim ->&c->lock ->pool_lock#2 ->&rq->__lock ->&net->packet.sklist_lock FD: 1 BD: 5 ....: rlock-AF_X25 FD: 84 BD: 185 ++++: &s->s_dquot.dqio_sem ->&ei->i_es_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->lock#4 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&obj_hash[i].lock ->&dquot->dq_dqb_lock ->&c->lock ->&n->list_lock ->key#34 ->&ei->i_raw_lock ->dq_data_lock ->&ei->i_data_sem/2 ->remove_cache_srcu FD: 45 BD: 186 ++++: &ei->i_data_sem/2 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&ei->i_es_lock ->&obj_hash[i].lock ->&ei->i_raw_lock ->&c->lock FD: 1 BD: 185 +.+.: dq_state_lock FD: 61 BD: 506 +.+.: dq_data_lock ->&sb->s_type->i_lock_key#23 FD: 85 BD: 184 +.+.: &dquot->dq_lock ->&s->s_dquot.dqio_sem FD: 47 BD: 4 +.+.: &sb->s_type->i_mutex_key#4/4 ->&simple_offset_lock_class ->tk_core.seq.seqcount ->&dentry->d_lock ->rename_lock FD: 1 BD: 16 +.+.: &keyring->lock FD: 87 BD: 12 +.+.: fscrypt_add_key_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->key_user_lock ->crngs.lock ->key_serial_lock ->key_construction_mutex ->root_key_user.lock ->&type->lock_class ->&keyring->lock FD: 86 BD: 185 .+.+: dquot_srcu ->&sb->s_type->i_lock_key#23 ->&dquot->dq_lock ->&rq->__lock FD: 231 BD: 12 +.+.: &type->i_mutex_dir_key#3/5 ->&ei->xattr_sem ->fs_reclaim ->pool_lock#2 ->&type->i_mutex_dir_key#3 ->&obj_hash[i].lock ->rename_lock.seqcount ->stock_lock ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#23 ->&type->i_mutex_dir_key#3/2 FD: 261 BD: 10 +.+.: &type->s_vfs_rename_key#3 ->&type->i_mutex_dir_key#3/1 ->&type->i_mutex_dir_key#3/5 FD: 43 BD: 581 +.+.: &sb->s_type->i_lock_key#45 ->&dentry->d_lock FD: 113 BD: 14 +.+.: &ei->i_data_sem#2 ->&ei->i_extent_cache_lock ->&sbi->s_alloc_mutex ->&sb->s_type->i_lock_key#45 ->tk_core.seq.seqcount ->&mapping->i_mmap_rwsem FD: 1 BD: 14 .+.+: &sbi->s_cred_lock FD: 82 BD: 18 +.+.: &sbi->s_alloc_mutex ->tk_core.seq.seqcount ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#3 ->&wb->list_lock ->&wb->work_lock ->&sb->s_type->i_lock_key#45 ->&rq->__lock FD: 92 BD: 5 +.+.: (quota_release_work).work ->dq_list_lock ->dquot_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->dquot_srcu ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#9 ->&dquot->dq_lock FD: 1 BD: 4 ....: fs/quota/dquot.c:273 FD: 35 BD: 10 ....: dquot_srcu_srcu_usage.lock ->&obj_hash[i].lock FD: 175 BD: 9 .+.+: sb_writers#20 ->mount_lock ->&type->i_mutex_dir_key#13/1 ->inode_hash_lock ->fs_reclaim ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#37 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->pool_lock#2 ->&n->list_lock ->&dentry->d_lock ->tomoyo_ss ->&type->i_mutex_dir_key#15 ->(console_sem).lock ->&sb->s_type->i_lock_key#45 ->&s->s_inode_list_lock ->&sbi->s_alloc_mutex ->tk_core.seq.seqcount ->&wb->list_lock ->&type->s_vfs_rename_key#4 ->&mapping->i_mmap_rwsem ->&ei->i_data_sem#2 ->&xa->xa_lock#9 ->&obj_hash[i].lock ->&ei->i_extent_cache_lock ->&fsnotify_mark_srcu FD: 161 BD: 11 +.+.: &type->i_mutex_dir_key#13/1 ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->pool_lock#2 ->&c->lock ->&ei->i_extent_cache_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->&obj_hash[i].lock ->tomoyo_ss ->&s->s_inode_list_lock ->&sbi->s_alloc_mutex ->inode_hash_lock ->&sb->s_type->i_lock_key#45 ->&wb->list_lock ->&ei->i_data_sem#2 ->&type->i_mutex_dir_key#14/5 ->&n->list_lock ->&type->i_mutex_dir_key#15 ->&sb->s_type->i_mutex_key#28/4 ->&sb->s_type->i_mutex_key#29 ->&fsnotify_mark_srcu FD: 1 BD: 18 +.+.: &ei->i_extent_cache_lock FD: 131 BD: 12 +.+.: &type->i_mutex_dir_key#14/5 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->stock_lock ->&dentry->d_lock ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->&sbi->s_alloc_mutex ->tk_core.seq.seqcount ->inode_hash_lock ->&sb->s_type->i_lock_key#45 ->&wb->list_lock ->&sb->s_type->i_mutex_key#29 ->&sb->s_type->i_mutex_key#28/4 ->&sb->s_type->i_lock_key#37 FD: 162 BD: 10 +.+.: &type->s_vfs_rename_key#4 ->&type->i_mutex_dir_key#13/1 ->&type->i_mutex_dir_key#14/5 FD: 92 BD: 13 +.+.: &sb->s_type->i_mutex_key#28/4 ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->tk_core.seq.seqcount ->&n->list_lock ->rename_lock ->&rq->__lock FD: 175 BD: 12 ++++: &type->i_mutex_dir_key#15 ->tk_core.seq.seqcount ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->namespace_sem ->stock_lock ->&dentry->d_lock ->&c->lock ->&rq->__lock ->&ei->i_extent_cache_lock ->mmu_notifier_invalidate_range_start ->inode_hash_lock ->&mapping->i_private_lock ->bit_wait_table + i ->&sbi->s_cred_lock ->&sb->s_type->i_lock_key#45 ->rcu_node_0 ->(console_sem).lock ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&n->list_lock FD: 116 BD: 13 +.+.: &sb->s_type->i_mutex_key#29 ->fs_reclaim ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&sbi->s_alloc_mutex ->&dentry->d_lock ->&ei->i_data_sem#2 ->&rq->__lock ->&sb->s_type->i_lock_key#45 ->&wb->list_lock FD: 4 BD: 225 +.+.: &cluster->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 69 ..-.: &list->lock#27 FD: 451 BD: 2 +.+.: (work_completion)(&rdev->conn_work) ->&rdev->wiphy.mtx FD: 19 BD: 76 +...: &dev_addr_list_lock_key#16 ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock FD: 29 BD: 4 ..-.: &dev->wait#2 ->&p->pi_lock FD: 30 BD: 3 ..-.: &dev->lock#2 ->&obj_hash[i].lock ->&base->lock ->&dev->wait#2 FD: 441 BD: 1 +.+.: mISDN_mutex ->&mm->mmap_lock ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->&obj_hash[i].lock ->&dev->lock#2 FD: 81 BD: 1 .+.+: kn->active#52 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] FD: 1096 BD: 1 +.+.: (wq_completion)smc_hs_wq ->(work_completion)(&smc->connect_work) FD: 1 BD: 5 +...: slock-AF_BLUETOOTH-BTPROTO_SCO FD: 1 BD: 67 +...: &sch->root_lock_key#336 FD: 1095 BD: 2 +.+.: (work_completion)(&smc->connect_work) ->k-sk_lock-AF_INET ->k-slock-AF_INET#2 ->sk_lock-AF_SMC ->slock-AF_SMC FD: 1 BD: 67 +...: &sch->root_lock_key#791 FD: 324 BD: 3 +.+.: &sqd->lock ->&____s->seqcount#4 ->&p->pi_lock ->&rq->__lock ->&prev->lock ->&p->alloc_lock ->&sqd->wait ->key#29 ->&x->wait#37 ->&hash->wait ->cpu_hotplug_lock ->&acct->lock ->&obj_hash[i].lock ->pool_lock#2 ->percpu_counters_lock ->pcpu_lock FD: 94 BD: 1 +.+.: crypto_cfg_mutex ->&rq->__lock ->(console_sem).lock ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 ->rcu_node_0 ->rlock-AF_NETLINK FD: 1 BD: 67 +...: &sch->root_lock_key#792 FD: 556 BD: 1 +.+.: (wq_completion)hci4 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 39 BD: 1 +.+.: (wq_completion)bond0#17 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 1 BD: 4 +...: rds_sock_lock FD: 1 BD: 5 +...: slock-AF_PHONET FD: 1 BD: 4 +...: l2tp_ip6_lock FD: 1 BD: 13 ....: &mp->wait FD: 1 BD: 3 ....: &x->wait#48 FD: 1 BD: 5 ....: &list->lock#29 FD: 1 BD: 67 +...: &sch->root_lock_key#777 FD: 1 BD: 67 +...: &sch->root_lock_key#764 FD: 1 BD: 67 +...: &sch->root_lock_key#766 FD: 1 BD: 4 +.+.: pfkey_mutex FD: 1 BD: 4 +...: clock-AF_RXRPC FD: 1 BD: 1 +...: &r->producer_lock#4 FD: 1 BD: 67 +...: &sch->root_lock_key#763 FD: 1 BD: 67 +...: &sch->root_lock_key#779 FD: 1 BD: 4 ....: &sqd->wait FD: 31 BD: 14 ..-.: jfsLCacheLock ->&obj_hash[i].lock ->&lbuf->l_ioevent ->&log->free_wait FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#17 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 59 BD: 584 +.+.: &sb->s_type->i_lock_key#50 ->&dentry->d_lock ->&xa->xa_lock#9 FD: 1 BD: 11 ..-.: &(log)->synclock FD: 1 BD: 12 +.+.: &bmp->db_bmaplock FD: 1 BD: 2 ....: &jfs_ip->ag_lock FD: 1 BD: 9 ....: &TxAnchor.freelockwait FD: 94 BD: 11 ++++: &jfs_ip->rdwrlock#2/2 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->pool_lock#2 ->tk_core.seq.seqcount ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq ->&bmp->db_bmaplock ->&mp->wait ->&sb->s_type->i_lock_key#50 ->&wb->list_lock FD: 33 BD: 4 +.+.: sk_lock-AF_PHONET ->slock-AF_PHONET ->&pnsocks.lock ->resource_mutex ->&obj_hash[i].lock ->&x->wait#2 ->&rq->__lock FD: 1 BD: 1 ....: &TxAnchor.freewait FD: 1 BD: 67 +...: &sch->root_lock_key#762 FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg2#44 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->&rq->__lock FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#22 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg0#43 ->(work_completion)(&peer->transmit_handshake_work) FD: 5 BD: 1 +.+.: put_task_map-wait-type-override#4 ->&obj_hash[i].lock ->pool_lock#2 ->stock_lock FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg1#43 ->(work_completion)(&peer->transmit_handshake_work) FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg0#33 ->(work_completion)(&peer->transmit_handshake_work) FD: 48 BD: 1 .+.+: sb_writers#27 ->mount_lock FD: 49 BD: 1 +...: &dev_addr_list_lock_key#7/2 ->&dev_addr_list_lock_key/1 ->&dev_addr_list_lock_key#8 FD: 1 BD: 67 +.+.: ifalias_mutex FD: 10 BD: 12 +...: &tunnel->list_lock ->&pn->l2tp_session_idr_lock FD: 1 BD: 2 +.+.: &sbi->s_fc_lock FD: 1 BD: 9 +.+.: unix_gc_lock FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg2#33 ->(work_completion)(&peer->transmit_handshake_work) FD: 36 BD: 1 +.-.: (&dwork->timer)#4 ->&rdev->wiphy_work_lock FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg1#41 ->(work_completion)(&peer->transmit_handshake_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#42 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 80 BD: 2 +.+.: nlk_cb_mutex-NETFILTER ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->ip_set_ref_lock ->rlock-AF_NETLINK FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#22 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 29 BD: 1 +.+.: &new_ruleset->lock ->&rq->__lock ->&new_ruleset->lock/1 FD: 1 BD: 2 +.+.: &new_ruleset->lock/1 FD: 7 BD: 151 +.+.: &cache->alloc_lock ->swap_avail_lock ->&p->lock#2 FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg2#41 ->(work_completion)(&peer->transmit_handshake_work) FD: 1 BD: 3 +...: ip_set_ref_lock FD: 1 BD: 1 ....: _rs.lock#8 FD: 1 BD: 1 ....: _rs.lock#9 FD: 1 BD: 1 +.+.: cuse_lock FD: 138 BD: 172 ++++: btrfs-uuid-00 ->&fs_info->fs_roots_radix_lock ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-uuid-00/1 ->key#25 FD: 1 BD: 6 +.+.: &ping_table.lock FD: 2 BD: 1 .+.+: &iopt->domains_rwsem ->&iopt->iova_rwsem FD: 1 BD: 183 +.+.: &wsm->ws_lock FD: 1 BD: 183 +...: &wsm.lock FD: 1 BD: 2 +.+.: &compr_pool.lock FD: 1 BD: 734 +.+.: &((cluster_info + ci)->lock)#2 FD: 9 BD: 151 ....: &xa->xa_lock#27 ->&ctrl->lock#2 ->&c->lock ->pool_lock#2 FD: 1 BD: 730 +.+.: mmlist_lock FD: 16 BD: 151 +.+.: &acomp_ctx->mutex ->scomp_scratch.lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 ->&class->lock ->&pool->migrate_lock ->&zspage->lock FD: 4 BD: 732 +.+.: &class->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 152 +.+.: scomp_scratch.lock FD: 2 BD: 732 .+.+: &zspage->lock ->lock#11 FD: 7 BD: 731 .+.+: &pool->migrate_lock ->&zspage->lock ->&class->lock FD: 1 BD: 733 +.+.: lock#11 FD: 10 BD: 722 +.+.: &xa->xa_lock#28 ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&n->list_lock FD: 1 BD: 152 ....: &ctrl->lock#2 FD: 1 BD: 152 ....: key#39 FD: 5 BD: 1 +.+.: &xa->xa_lock#29 ->pool_lock#2 ->stock_lock ->&obj_hash[i].lock FD: 1 BD: 730 ....: &cache->free_lock FD: 81 BD: 1 .+.+: kn->active#53 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] FD: 4 BD: 2 +.-.: icmp_global.lock ->batched_entropy_u8.lock FD: 439 BD: 2 .+.+: sb_writers#24 ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&obj_hash[i].lock FD: 172 BD: 182 +.+.: btrfs-treloc-01 ->&rsv->lock ->&____s->seqcount#12 ->&fs_info->balance_lock ->&fs_info->block_group_cache_lock ->&space_info->groups_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->lock ->&eb->refs_lock ->btrfs-treloc-01/1 ->&swapped_blocks->lock ->&cur_trans->delayed_refs.lock ->&c->lock ->&obj_hash[i].lock ->key#25 ->&fs_info->global_root_lock ->btrfs-extent-00 FD: 1 BD: 1 ....: &ps->lock FD: 1 BD: 67 +...: &sch->root_lock_key#778 FD: 1 BD: 67 +...: &sch->root_lock_key#780 FD: 99 BD: 1 +.+.: sk_lock-AF_RDS ->slock-AF_RDS ->rds_trans_sem ->batched_entropy_u16.lock ->once_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 2 +...: slock-AF_RDS FD: 1 BD: 67 +...: &sch->root_lock_key#737 FD: 80 BD: 68 +.+.: &data->nh_lock ->fs_reclaim ->pool_lock#2 ->rcu_node_0 ->&obj_hash[i].lock FD: 114 BD: 3 +.+.: &sb->s_type->i_mutex_key#33/4 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&mp->wait ->&obj_hash[i].lock ->jfsTxnLock ->&jfs_ip->commit_mutex ->rename_lock FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg0#39 ->(work_completion)(&peer->transmit_handshake_work) FD: 1 BD: 67 +...: &sch->root_lock_key#784 FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg1#39 ->(work_completion)(&peer->transmit_handshake_work) FD: 1 BD: 67 +...: &sch->root_lock_key#785 FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#40 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 1 BD: 4 ..-.: mem_id_pool.xa_lock FD: 83 BD: 3 +.+.: mem_id_lock ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->mem_id_pool.xa_lock ->&ht->lock FD: 95 BD: 1 +.+.: mf_mutex ->hugetlb_lock ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->&c->lock ->pool_lock#2 ->&n->list_lock ->&mapping->i_mmap_rwsem ->lock#2 ->&lruvec->lru_lock ->(console_sem).lock FD: 1 BD: 22 +.+.: &sbinfo->shrinklist_lock FD: 460 BD: 1 .+.+: &clk->rwsem ->fs_reclaim ->pool_lock#2 ->&c->lock ->&ptp->tsevqs_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&mm->mmap_lock FD: 1 BD: 2 ....: &ptp->tsevqs_lock FD: 59 BD: 581 +.+.: &sb->s_type->i_lock_key#49 ->&xa->xa_lock#9 ->&dentry->d_lock FD: 370 BD: 1 +.+.: &type->s_umount_key#82/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->&c->lock ->bdev_lock ->&disk->open_mutex ->&n->list_lock ->remove_cache_srcu ->&obj_hash[i].lock ->nls_lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&x->wait#25 ->&base->lock ->&rq->__lock ->(&timer.timer) ->&____s->seqcount#2 ->&xa->xa_lock#5 ->&sb->s_type->i_lock_key#49 ->&xa->xa_lock#9 ->lock#4 ->&mapping->i_private_lock ->&folio_wait_table[i] ->&s->s_inode_list_lock ->lock#5 ->&lruvec->lru_lock ->&fsnotify_mark_srcu ->(console_sem).lock ->&x->wait#23 ->rcu_node_0 ->&sb->map[i].swap_lock ->&tree->tree_lock#2 ->&sbi->vh_mutex ->&dentry->d_lock ->&fq->mq_flush_lock FD: 1 BD: 67 +...: &sch->root_lock_key#337 FD: 1 BD: 67 +...: &sch->root_lock_key#341 FD: 187 BD: 70 +.+.: team->team_lock_key#21 ->fs_reclaim ->&c->lock ->netpoll_srcu ->net_rwsem ->&tn->lock ->&dev_addr_list_lock_key#8 ->&dir->lock#2 ->input_pool.lock ->netdev_rename_lock.seqcount ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&rq->__lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock ->rcu_node_0 ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 1 BD: 67 +...: &sch->root_lock_key#330 FD: 1 BD: 67 +...: &sch->root_lock_key#329 FD: 1 BD: 67 +...: &sch->root_lock_key#328 FD: 29 BD: 8 ....: &x->wait#49 ->&p->pi_lock FD: 30 BD: 7 +.+.: &io->lock ->&x->wait#49 FD: 160 BD: 1 +.+.: &type->s_umount_key#81 ->&rq->__lock ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#48 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->inode_hash_lock ->pool_lock#2 ->&fsnotify_mark_srcu ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->lock ->&m->req_lock ->&clnt->lock ->&dentry->d_lock/1 ->&bdi->wb_switch_rwsem ->&s->s_sync_lock FD: 1 BD: 67 +...: &sch->root_lock_key#772 FD: 1 BD: 4 ....: &q->lock FD: 1 BD: 4 ....: &rs->rs_rdma_lock FD: 1 BD: 4 ....: &rs->rs_lock FD: 1 BD: 4 ....: rds_cong_lock FD: 1 BD: 67 +...: &sch->root_lock_key#761 FD: 1 BD: 67 +...: &sch->root_lock_key#776 FD: 1 BD: 67 +...: &sch->root_lock_key#765 FD: 1 BD: 67 +...: &sch->root_lock_key#771 FD: 1 BD: 67 +...: &sch->root_lock_key#790 FD: 39 BD: 1 +.+.: (wq_completion)bond0#23 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 67 +...: &sch->root_lock_key#794 FD: 1 BD: 67 +...: &sch->root_lock_key#795 FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#20 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#20 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 1 BD: 67 +...: &sch->root_lock_key#796 FD: 1 BD: 67 +...: &sch->root_lock_key#797 FD: 1 BD: 67 +...: &sch->root_lock_key#798 FD: 1 BD: 67 +...: &sch->root_lock_key#799 FD: 1 BD: 67 +...: &sch->root_lock_key#800 FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg2#40 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 102 BD: 173 +.+.: btrfs-uuid-00/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&fs_info->trans_lock ->&eb->refs_lock ->&xa->xa_lock#9 FD: 1 BD: 171 +.+.: &rc->reloc_root_tree.lock FD: 99 BD: 171 +.+.: btrfs-tree-01/7 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&xa->xa_lock#9 FD: 1 BD: 219 +.+.: &fs_info->relocation_bg_lock FD: 103 BD: 185 +.+.: btrfs-dreloc-00/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&tree->lock ->&cur_trans->delayed_refs.lock ->&rsv->lock ->&fs_info->trans_lock ->&eb->refs_lock ->&xa->xa_lock#9 ->key#25 FD: 1 BD: 4 +.+.: &fs_info->swapfile_pins_lock FD: 43 BD: 502 +.+.: &sb->s_type->i_lock_key#51 ->&dentry->d_lock FD: 1 BD: 1 ....: _rs.lock#10 FD: 104 BD: 1 .+.+: &type->i_mutex_dir_key#19 ->&rq->__lock ->fs_reclaim ->stock_lock ->pool_lock#2 ->&dentry->d_lock ->&mapping->i_private_lock ->mmu_notifier_invalidate_range_start ->lock#4 ->&sb->s_type->i_lock_key#51 ->rename_lock ->(console_sem).lock ->&c->lock ->&n->list_lock FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg2#42 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 556 BD: 1 +.+.: (wq_completion)hci5 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#21 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 1 BD: 67 +...: &sch->root_lock_key#810 FD: 89 BD: 153 .+.+: sb_pagefaults#2 ->tk_core.seq.seqcount ->mount_lock ->&sb->s_type->i_lock_key#6 ->&wb->list_lock ->&mapping->i_private_lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#3 ->&wb->work_lock ->&rq->__lock FD: 40 BD: 2 +.+.: (work_completion)(&hu->write_work) ->&hu->proto_lock ->&port->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 358 BD: 1 +.+.: &type->s_umount_key#86/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_mutex ->&c->lock ->list_lrus_mutex ->sb_lock ->inode_hash_lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->bdev_lock ->&disk->open_mutex ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#9 ->&mapping->i_private_lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&rq->__lock ->(console_sem).lock ->&____s->seqcount#2 ->&xa->xa_lock#5 ->&sb->s_type->i_lock_key#51 ->&dentry->d_lock FD: 9 BD: 85 +.--: &____s->seqcount#14 ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->pool_lock#2 FD: 10 BD: 84 +.-.: &(&bp->lock)->lock ->&____s->seqcount#14 FD: 451 BD: 1 +.+.: (wq_completion)nbd4-recv ->(work_completion)(&args->work) FD: 78 BD: 1 .+.+: &type->lock_class#2 ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&c->lock FD: 30 BD: 4 +.+.: subsys mutex#79 ->&k->k_lock ->&rq->__lock FD: 110 BD: 1 .+.+: sb_writers#26 ->mount_lock ->&sb->s_type->i_mutex_key#34 FD: 118 BD: 6 +.+.: &tree->tree_lock#2 ->&rq->__lock ->&tree->hash_lock#2 ->fs_reclaim ->&c->lock ->pool_lock#2 ->&____s->seqcount ->rcu_node_0 ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->mmu_notifier_invalidate_range_start ->&mapping->i_private_lock ->&____s->seqcount#2 ->tk_core.seq.seqcount ->&folio_wait_table[i] ->&node->lock_wq#2 ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#49 ->&wb->list_lock ->&tree->tree_lock#2/2 ->&sb->map[i].swap_lock FD: 1 BD: 68 ....: &pool->xsk_tx_list_lock FD: 97 BD: 4 +.+.: &hiddev->existancelock ->fs_reclaim ->&c->lock ->&n->list_lock ->&rq->__lock ->pool_lock#2 ->free_vmap_area_lock ->&vn->busy.lock ->&dev->power.lock ->&hdev->ll_open_lock ->&hiddev->list_lock FD: 106 BD: 2 +.+.: &sb->s_type->i_mutex_key#34 ->&sb->s_type->i_lock_key#20 ->rename_lock.seqcount ->fs_reclaim ->&c->lock ->pool_lock#2 ->&xa->xa_lock#5 ->&obj_hash[i].lock ->stock_lock ->&dentry->d_lock ->&mq_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->tomoyo_ss ->mount_lock FD: 1 BD: 30 ....: &list->lock#28 FD: 34 BD: 6 +.+.: (work_completion)(&tunnel->del_work) ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&tunnel->list_lock ->&pn->l2tp_tunnel_idr_lock FD: 1 BD: 68 ....: umem_ida.xa_lock FD: 66 BD: 5 +.+.: &ps->sk_lock ->&tunnel->list_lock ->fs_reclaim ->remove_cache_srcu ->pool_lock#2 ->&rq->__lock ->&dir->lock ->&pn->all_channels_lock FD: 1088 BD: 2 +.+.: (work_completion)(&pool->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&vn->busy.lock ->&obj_hash[i].lock ->&vn->lazy.lock ->pool_lock#2 ->umem_ida.xa_lock ->&lruvec->lru_lock FD: 1093 BD: 15 +.+.: &devlink->lock_key#21 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&xa->xa_lock#19 ->pcpu_alloc_mutex ->&c->lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->stack_depot_init_mutex ->&devlink_port->type_lock ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq FD: 38 BD: 29 ++++: &hu->proto_lock ->&list->lock#28 ->&rq->__lock ->&rsp->gp_wait ->&obj_hash[i].lock ->&x->wait#2 FD: 1 BD: 67 +...: &sch->root_lock_key#752 FD: 1 BD: 67 +...: &sch->root_lock_key#753 FD: 1 BD: 67 +...: &sch->root_lock_key#756 FD: 1 BD: 67 +...: &sch->root_lock_key#757 FD: 1 BD: 67 +...: &sch->root_lock_key#758 FD: 1 BD: 67 +...: &sch->root_lock_key#759 FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#21 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 450 BD: 2 +.+.: (work_completion)(&args->work) ->&u->iolock ->&u->lock ->&rq->__lock ->&nsock->tx_lock ->&config->recv_wq ->&obj_hash[i].lock ->pool_lock#2 FD: 6 BD: 2 +.+.: (work_completion)(&nlk->work) ->&obj_hash[i].lock ->pool_lock#2 ->rlock-AF_NETLINK ->&dir->lock FD: 29 BD: 4 ....: &x->wait#50 ->&p->pi_lock FD: 1 BD: 1 +.+.: &cprc->stat_lock FD: 4 BD: 4 +.+.: &fi->i_size_lock ->tk_core.seq.seqcount FD: 1 BD: 67 +...: &sch->root_lock_key#751 FD: 185 BD: 5 +.+.: mapping.invalidate_lock#5 ->&mapping->i_mmap_rwsem ->&sb->s_type->i_lock_key#41 ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock ->&sbi->cp_rwsem ->tk_core.seq.seqcount ->&sbi->inode_lock[i] FD: 171 BD: 4 +.+.: &sbi->pin_sem ->&sbi->cp_rwsem ->&sbi->inode_lock[i] ->&sbi->gc_lock FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg0#35 ->(work_completion)(&peer->transmit_handshake_work) FD: 1 BD: 23 ....: &sbi->error_lock FD: 141 BD: 1 +.+.: &type->s_umount_key#84 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&rq->__lock ->&sb->s_type->i_lock_key#50 ->&dentry->d_lock/1 ->rcu_node_0 ->&bdi->wb_waitq ->&(log)->gclock ->&(log)->loglock ->&sb->s_type->i_lock_key#3 ->&xa->xa_lock#9 ->lock#4 ->lock#5 ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&s->s_inode_list_lock ->&jfs_ip->ag_lock ->inode_hash_lock ->&fsnotify_mark_srcu ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&mp->wait ->&c->lock ->tk_core.seq.seqcount ->&lruvec->lru_lock ->&wb->list_lock ->&folio_wait_table[i] ->jfs_log_mutex FD: 63 BD: 2 +.+.: (work_completion)(&sbi->s_error_work) ->&sbi->sb_lock FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg1#36 ->(work_completion)(&peer->transmit_handshake_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#35 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#18 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#36 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#18 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 4 BD: 13 +...: &pn->l2tp_tunnel_idr_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1093 BD: 15 +.+.: &devlink->lock_key#20 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&n->list_lock ->&xa->xa_lock#19 ->pcpu_alloc_mutex ->&obj_hash[i].lock ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->stack_depot_init_mutex ->&devlink_port->type_lock ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq FD: 78 BD: 153 .+.+: &ctx->map_changing_lock ->&rq->__lock ->fs_reclaim ->&____s->seqcount ->stock_lock ->pool_lock#2 ->ptlock_ptr(ptdesc)#2 ->rcu_node_0 FD: 1 BD: 36 ....: &(&info->deferred_work)->timer FD: 1 BD: 5 ....: &hiddev->list_lock FD: 207 BD: 1 +.+.: (wq_completion)hci7#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) FD: 556 BD: 1 +.+.: (wq_completion)hci7 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 556 BD: 1 +.+.: (wq_completion)hci3 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 1 BD: 67 +...: &sch->root_lock_key#770 FD: 1 BD: 67 +...: &sch->root_lock_key#769 FD: 207 BD: 1 +.+.: (wq_completion)hci3#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->&rq->__lock FD: 1 BD: 3 +.+.: &hip->extents_lock FD: 139 BD: 1 +.+.: &type->s_umount_key#85 ->&x->wait#23 ->shrinker_mutex ->&obj_hash[i].lock ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#49 ->&dentry->d_lock/1 ->&bdi->wb_waitq ->&rq->__lock ->&bdi->wb_switch_rwsem ->&s->s_sync_lock ->&sbi->vh_mutex ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->rcu_node_0 ->&rcu_state.expedited_wq ->inode_hash_lock ->pool_lock#2 ->&fsnotify_mark_srcu ->&base->lock ->&tree->hash_lock#2 ->&mapping->i_private_lock ->stock_lock ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&____s->seqcount FD: 9 BD: 13 +...: &pn->l2tp_session_idr_lock ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock FD: 1 BD: 4 +.+.: &info->lock#2 FD: 1 BD: 67 +...: &sch->root_lock_key#742 FD: 1 BD: 67 +...: &sch->root_lock_key#743 FD: 1 BD: 67 +...: &sch->root_lock_key#744 FD: 1 BD: 67 +...: &sch->root_lock_key#745 FD: 1 BD: 67 +...: &sch->root_lock_key#746 FD: 1 BD: 67 +...: &sch->root_lock_key#747 FD: 1 BD: 12 ....: &new->fa_lock FD: 187 BD: 70 +.+.: team->team_lock_key#20 ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->fs_reclaim ->&c->lock ->netpoll_srcu ->net_rwsem ->&tn->lock ->&dev_addr_list_lock_key#8 ->&dir->lock#2 ->&n->list_lock ->input_pool.lock ->netdev_rename_lock.seqcount ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&____s->seqcount#2 ->&____s->seqcount ->lweventlist_lock ->(console_sem).lock ->remove_cache_srcu FD: 39 BD: 1 +.+.: (wq_completion)bond0#20 ->(work_completion)(&(&slave->notify_work)->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#22 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#18 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 1 BD: 76 +...: &r->producer_lock#5 FD: 80 BD: 2 +.+.: jfs_log_mutex ->&(log)->loglock ->&sb->s_type->i_lock_key#3 ->&xa->xa_lock#9 ->&rq->__lock ->lock#4 ->lock#5 ->&(log)->gclock ->jfsLCacheLock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->rcu_node_0 ->&rcu_state.expedited_wq ->&obj_hash[i].lock FD: 1 BD: 67 +...: &sch->root_lock_key#724 FD: 1 BD: 1 ....: sync_timeline_list_lock FD: 1 BD: 5 +.+.: resource_mutex FD: 1 BD: 4 +...: clock-AF_PHONET FD: 1 BD: 1 ....: &obj->lock FD: 1 BD: 749 ....: &wq#4 FD: 78 BD: 1 +.+.: (wq_completion)wg-kex-wg0#41 ->(work_completion)(&peer->transmit_handshake_work) FD: 1093 BD: 15 +.+.: &devlink->lock_key#22 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&xa->xa_lock#19 ->pcpu_alloc_mutex ->batched_entropy_u8.lock ->kfence_freelist_lock ->&obj_hash[i].lock ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&n->list_lock ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->stack_depot_init_mutex ->&____s->seqcount#2 ->&____s->seqcount ->&devlink_port->type_lock ->&nsim_trap_data->trap_lock FD: 1088 BD: 2 +.+.: cfg80211_disconnect_work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 87 BD: 71 +.+.: k-sk_lock-AF_TIPC/1 ->&rq->__lock ->k-slock-AF_TIPC ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->fs_reclaim ->pool_lock#2 ->&list->lock#17 FD: 108 BD: 2 +.+.: (work_completion)(&con->rwork) ->&rq->__lock ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->k-clock-AF_TIPC ->&srv->idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->&sb->s_type->i_lock_key#9 ->&xa->xa_lock#9 ->&fsnotify_mark_srcu ->&con->outqueue_lock FD: 111 BD: 2 +.+.: (work_completion)(&srv->awork) ->&rq->__lock ->&srv->idr_lock ->fs_reclaim ->&____s->seqcount ->rcu_node_0 ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->k-clock-AF_TIPC ->&sb->s_type->i_lock_key#9 ->&xa->xa_lock#9 ->&fsnotify_mark_srcu FD: 113 BD: 1 +.+.: (wq_completion)tipc_rcv ->&rq->__lock ->(work_completion)(&srv->awork) ->(work_completion)(&con->rwork) FD: 1 BD: 67 +...: &sch->root_lock_key#803 FD: 1 BD: 67 +...: &sch->root_lock_key#802 FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#20 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((worker))) *)((worker))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 1 BD: 67 +...: &sch->root_lock_key#788 FD: 1 BD: 67 +...: &sch->root_lock_key#787 FD: 7 BD: 714 +.+.: ptlock_ptr(ptdesc)#3 ->lock#4 ->&____s->seqcount ->key FD: 5 BD: 8 +.+.: &tree->hash_lock#2 ->lock#4 FD: 1 BD: 8 ....: &node->lock_wq#2 FD: 134 BD: 3 +.+.: &sbi->vh_mutex ->&rq->__lock ->&sbi->alloc_mutex ->fs_reclaim ->pool_lock#2 ->stock_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->inode_hash_lock ->&sb->s_type->i_lock_key#49 ->&wb->list_lock ->&wb->work_lock ->&sbi->work_lock#2 ->&tree->tree_lock#2 ->&c->lock ->&n->list_lock ->&obj_hash[i].lock FD: 46 BD: 4 +.+.: &sbi->alloc_mutex ->&rq->__lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->rcu_node_0 ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->&x->wait#25 ->(&timer.timer) FD: 14 BD: 4 +.+.: &sbi->work_lock#2 ->&obj_hash[i].lock ->&base->lock FD: 116 BD: 7 +.+.: &tree->tree_lock#2/2 ->&rq->__lock ->&tree->hash_lock#2 ->fs_reclaim ->pool_lock#2 ->rcu_node_0 ->&____s->seqcount ->stock_lock ->&xa->xa_lock#9 ->lock#4 ->mmu_notifier_invalidate_range_start ->&c->lock ->&mapping->i_private_lock ->tk_core.seq.seqcount ->&folio_wait_table[i] ->&____s->seqcount#2 ->&node->lock_wq#2 ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#49 ->&wb->list_lock FD: 1 BD: 3 +.+.: &HFSPLUS_I(inode)->extents_lock all lock chains: irq_context: 0 (console_sem).lock irq_context: 0 &obj_hash[i].lock irq_context: 0 &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex irq_context: 0 fixmap_lock irq_context: 0 cpu_hotplug_lock irq_context: 0 cpu_hotplug_lock jump_label_mutex irq_context: 0 console_mutex irq_context: 0 input_pool.lock irq_context: 0 base_crng.lock irq_context: 0 crng_init_wait.lock irq_context: 0 early_pfn_lock irq_context: 0 devtree_lock irq_context: 0 resource_lock irq_context: 0 restart_handler_list.lock irq_context: 0 system_transition_mutex irq_context: 0 pcpu_lock irq_context: 0 debug_hook_lock irq_context: 0 zonelist_update_seq irq_context: 0 zonelist_update_seq zonelist_update_seq.seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 &zone->lock irq_context: 0 &zone->lock &____s->seqcount irq_context: 0 &pcp->lock &zone->lock irq_context: 0 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &____s->seqcount irq_context: 0 pool_lock#2 irq_context: 0 pcpu_alloc_mutex irq_context: 0 pcpu_alloc_mutex pcpu_lock irq_context: 0 &n->list_lock irq_context: 0 &c->lock irq_context: 0 slab_mutex irq_context: 0 slab_mutex pool_lock#2 irq_context: 0 slab_mutex pcpu_alloc_mutex irq_context: 0 slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 shrinker_mutex irq_context: 0 cpu_hotplug_lock jump_label_mutex patch_lock irq_context: 0 trace_types_lock irq_context: 0 panic_notifier_list.lock irq_context: 0 die_chain.lock irq_context: 0 trace_event_sem irq_context: 0 batched_entropy_u32.lock irq_context: 0 batched_entropy_u32.lock crngs.lock irq_context: 0 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 sysctl_lock irq_context: 0 &rq->__lock irq_context: 0 &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 init_task.pi_lock irq_context: 0 init_task.pi_lock &rq->__lock irq_context: 0 init_task.vtime_seqcount irq_context: 0 slab_mutex &c->lock irq_context: 0 slab_mutex &pcp->lock &zone->lock irq_context: 0 slab_mutex &____s->seqcount irq_context: 0 wq_pool_mutex irq_context: 0 wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 wq_pool_mutex &____s->seqcount irq_context: 0 wq_pool_mutex &c->lock irq_context: 0 wq_pool_mutex pool_lock#2 irq_context: 0 wq_pool_mutex pcpu_alloc_mutex irq_context: 0 wq_pool_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 wq_pool_mutex &wq->mutex irq_context: 0 wq_pool_mutex &obj_hash[i].lock irq_context: 0 rcu_node_0 irq_context: 0 rcu_state.barrier_lock irq_context: 0 rcu_state.barrier_lock rcu_node_0 irq_context: 0 &rnp->exp_poll_lock irq_context: 0 &rnp->exp_poll_lock rcu_read_lock &pool->lock irq_context: 0 &rnp->exp_poll_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &pcp->lock &zone->lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 trace_event_sem trace_event_ida.xa_lock &____s->seqcount irq_context: 0 trace_event_sem trace_event_ida.xa_lock &c->lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock pool_lock#2 irq_context: 0 trigger_cmd_mutex irq_context: 0 free_vmap_area_lock irq_context: 0 &vn->busy.lock irq_context: 0 acpi_probe_mutex irq_context: 0 acpi_probe_mutex pool_lock#2 irq_context: 0 acpi_probe_mutex free_vmap_area_lock irq_context: 0 acpi_probe_mutex &vn->busy.lock irq_context: 0 acpi_probe_mutex &pcp->lock &zone->lock irq_context: 0 acpi_probe_mutex &____s->seqcount irq_context: 0 acpi_probe_mutex init_mm.page_table_lock irq_context: 0 acpi_probe_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_probe_mutex resource_lock irq_context: 0 acpi_probe_mutex cpu_hotplug_lock irq_context: 0 acpi_probe_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 acpi_probe_mutex cpu_hotplug_lock jump_label_mutex patch_lock irq_context: 0 acpi_probe_mutex (console_sem).lock irq_context: 0 acpi_probe_mutex irq_domain_mutex irq_context: 0 acpi_probe_mutex pcpu_alloc_mutex irq_context: 0 acpi_probe_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 acpi_probe_mutex irq_domain_mutex pool_lock#2 irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 acpi_probe_mutex &domain->mutex irq_context: 0 acpi_probe_mutex &domain->mutex sparse_irq_lock irq_context: 0 acpi_probe_mutex &domain->mutex sparse_irq_lock pool_lock#2 irq_context: 0 acpi_probe_mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex irq_context: 0 acpi_probe_mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 acpi_probe_mutex &domain->mutex sparse_irq_lock &obj_hash[i].lock irq_context: 0 acpi_probe_mutex &domain->mutex sparse_irq_lock &pcp->lock &zone->lock irq_context: 0 acpi_probe_mutex &domain->mutex sparse_irq_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_probe_mutex &domain->mutex sparse_irq_lock &____s->seqcount irq_context: 0 acpi_probe_mutex &domain->mutex sparse_irq_lock &c->lock irq_context: 0 acpi_probe_mutex &domain->mutex sparse_irq_lock &obj_hash[i].lock pool_lock irq_context: 0 acpi_probe_mutex &domain->mutex pool_lock#2 irq_context: 0 acpi_probe_mutex &domain->mutex &irq_desc_lock_class irq_context: 0 acpi_probe_mutex &desc->request_mutex irq_context: 0 acpi_probe_mutex &desc->request_mutex &irq_desc_lock_class irq_context: 0 acpi_probe_mutex &irq_desc_lock_class irq_context: 0 acpi_probe_mutex cpu_pm_notifier.lock irq_context: 0 acpi_probe_mutex &obj_hash[i].lock irq_context: 0 acpi_probe_mutex &vn->lazy.lock irq_context: 0 acpi_probe_mutex iort_msi_chip_lock irq_context: 0 acpi_probe_mutex &zone->lock irq_context: 0 acpi_probe_mutex &zone->lock &____s->seqcount irq_context: 0 acpi_probe_mutex its_lock irq_context: 0 acpi_probe_mutex resource_lock irq_context: 0 acpi_probe_mutex efi_mem_reserve_persistent_lock irq_context: 0 acpi_probe_mutex lpi_range_lock irq_context: 0 acpi_probe_mutex syscore_ops_lock irq_context: 0 acpi_probe_mutex its_lock &its->lock irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-down irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-up irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex resource_lock irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex pool_lock#2 irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex resource_lock irq_context: 0 timekeeper_lock irq_context: 0 timekeeper_lock tk_core.seq.seqcount irq_context: 0 timekeeper_lock tk_core.seq.seqcount &obj_hash[i].lock irq_context: 0 acpi_probe_mutex &domain->mutex &pcp->lock &zone->lock irq_context: 0 acpi_probe_mutex &domain->mutex &____s->seqcount irq_context: 0 acpi_probe_mutex &desc->request_mutex &irq_desc_lock_class irq_controller_lock irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex (console_sem).lock irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex clockevents_lock irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex clockevents_lock tk_core.seq.seqcount irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex clockevents_lock tick_broadcast_lock irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex clockevents_lock jiffies_seq.seqcount irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex &irq_desc_lock_class irq_context: 0 acpi_probe_mutex cpu_hotplug_lock cpuhp_state_mutex &irq_desc_lock_class irq_controller_lock irq_context: 0 acpi_probe_mutex clocksource_mutex irq_context: 0 clockevents_lock irq_context: 0 tk_core.seq.seqcount irq_context: 0 &base->lock irq_context: 0 &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 batched_entropy_u64.lock irq_context: 0 batched_entropy_u64.lock crngs.lock irq_context: 0 pmus_lock irq_context: 0 pmus_lock pcpu_alloc_mutex irq_context: 0 pmus_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 pmus_lock pool_lock#2 irq_context: 0 pmus_lock &obj_hash[i].lock irq_context: 0 &swhash->hlist_mutex irq_context: 0 pmus_lock &cpuctx_mutex irq_context: 0 pmus_lock &obj_hash[i].lock pool_lock irq_context: 0 tty_ldiscs_lock irq_context: 0 console_lock irq_context: 0 console_lock pool_lock#2 irq_context: 0 console_lock &obj_hash[i].lock irq_context: 0 console_lock &pcp->lock &zone->lock irq_context: 0 console_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 console_lock &____s->seqcount irq_context: 0 console_lock &c->lock irq_context: 0 console_lock kbd_event_lock irq_context: 0 console_lock kbd_event_lock led_lock irq_context: 0 console_lock (console_sem).lock irq_context: 0 console_lock console_owner_lock irq_context: 0 once_lock irq_context: 0 once_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 console_mutex (console_sem).lock irq_context: 0 console_mutex console_lock irq_context: 0 init_task.alloc_lock irq_context: 0 acpi_ioremap_lock irq_context: 0 acpi_ioremap_lock pool_lock#2 irq_context: 0 semaphore->lock irq_context: 0 *(&acpi_gbl_reference_count_lock) irq_context: 0 hrtimer_bases.lock irq_context: 0 hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 percpu_counters_lock irq_context: 0 tomoyo_policy_lock irq_context: 0 tomoyo_policy_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem crngs.lock irq_context: 0 pernet_ops_rwsem net_rwsem irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex irq_context: 0 rtnl_mutex pool_lock#2 irq_context: 0 lock irq_context: 0 lock kernfs_idr_lock irq_context: 0 lock kernfs_idr_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem irq_context: 0 file_systems_lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 dq_list_lock irq_context: 0 sb_lock irq_context: 0 &type->s_umount_key/1 irq_context: 0 &type->s_umount_key/1 &c->lock irq_context: 0 &type->s_umount_key/1 &____s->seqcount irq_context: 0 &type->s_umount_key/1 pool_lock#2 irq_context: 0 &type->s_umount_key/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key/1 shrinker_mutex irq_context: 0 &type->s_umount_key/1 shrinker_mutex pool_lock#2 irq_context: 0 &type->s_umount_key/1 list_lrus_mutex irq_context: 0 &type->s_umount_key/1 sb_lock irq_context: 0 &type->s_umount_key/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key/1 percpu_counters_lock irq_context: 0 &type->s_umount_key/1 crngs.lock irq_context: 0 &type->s_umount_key/1 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key/1 batched_entropy_u32.lock irq_context: 0 &type->s_umount_key/1 &sb->s_type->i_lock_key irq_context: 0 &type->s_umount_key/1 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &type->s_umount_key/1 &dentry->d_lock irq_context: 0 mnt_id_ida.xa_lock irq_context: 0 &dentry->d_lock irq_context: 0 mount_lock irq_context: 0 mount_lock mount_lock.seqcount irq_context: 0 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#2/1 irq_context: 0 &type->s_umount_key#2/1 pool_lock#2 irq_context: 0 &type->s_umount_key#2/1 pcpu_alloc_mutex irq_context: hardirq jiffies_lock irq_context: hardirq jiffies_lock jiffies_seq.seqcount irq_context: hardirq log_wait.lock irq_context: 0 &type->s_umount_key#2/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#2/1 shrinker_mutex irq_context: 0 &type->s_umount_key#2/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#2/1 sb_lock irq_context: 0 &type->s_umount_key#2/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#2/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#2/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#2/1 &____s->seqcount irq_context: 0 &type->s_umount_key#2/1 &c->lock irq_context: 0 &type->s_umount_key#2/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#2/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#2/1 &sb->s_type->i_lock_key#2 irq_context: 0 &type->s_umount_key#2/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 &type->s_umount_key#2/1 &dentry->d_lock irq_context: 0 ucounts_lock irq_context: 0 proc_inum_ida.xa_lock irq_context: 0 init_fs.lock irq_context: 0 init_fs.lock init_fs.seq.seqcount irq_context: 0 mnt_ns_tree_lock irq_context: 0 &type->s_umount_key#3/1 irq_context: 0 &type->s_umount_key#3/1 pool_lock#2 irq_context: 0 &type->s_umount_key#3/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#3/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#3/1 shrinker_mutex irq_context: 0 &type->s_umount_key#3/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#3/1 sb_lock irq_context: 0 &type->s_umount_key#3/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#3/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#3/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#3/1 &____s->seqcount irq_context: 0 &type->s_umount_key#3/1 &c->lock irq_context: 0 &type->s_umount_key#3/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#3/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#3/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#3/1 &sb->s_type->i_lock_key#3 &dentry->d_lock irq_context: 0 &type->s_umount_key#3/1 &dentry->d_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-down irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-up irq_context: 0 proc_subdir_lock irq_context: 0 proc_subdir_lock irq_context: 0 pernet_ops_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 &type->s_umount_key#4/1 irq_context: 0 &type->s_umount_key#4/1 pool_lock#2 irq_context: 0 &type->s_umount_key#4/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#4/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#4/1 shrinker_mutex irq_context: 0 &type->s_umount_key#4/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#4/1 sb_lock irq_context: 0 &type->s_umount_key#4/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#4/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#4/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#4/1 &sb->s_type->i_lock_key#4 irq_context: 0 &type->s_umount_key#4/1 &sb->s_type->i_lock_key#4 &dentry->d_lock irq_context: 0 &type->s_umount_key#4/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#5/1 irq_context: 0 &type->s_umount_key#5/1 pool_lock#2 irq_context: 0 &type->s_umount_key#5/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#5/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#5/1 shrinker_mutex irq_context: 0 &type->s_umount_key#5/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#5/1 sb_lock irq_context: 0 &type->s_umount_key#5/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#5/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#5/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#5/1 &sb->s_type->i_lock_key#5 irq_context: 0 &type->s_umount_key#5/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &type->s_umount_key#5/1 &dentry->d_lock irq_context: 0 cgroup_mutex pcpu_alloc_mutex irq_context: 0 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cgroup_mutex pool_lock#2 irq_context: 0 cgroup_mutex lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex &root->kernfs_rwsem irq_context: 0 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cgroup_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex cgroup_file_kn_lock irq_context: 0 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex css_set_lock irq_context: 0 lock cgroup_idr_lock irq_context: 0 lock cgroup_idr_lock pool_lock#2 irq_context: 0 cpuset_mutex irq_context: 0 cpuset_mutex callback_lock irq_context: 0 cgroup_mutex &c->lock irq_context: 0 cgroup_mutex &____s->seqcount irq_context: 0 cgroup_mutex blkcg_pol_mutex irq_context: 0 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex irq_context: 0 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex lock memcg_idr_lock irq_context: 0 cgroup_mutex lock memcg_idr_lock &c->lock irq_context: 0 cgroup_mutex lock memcg_idr_lock &____s->seqcount irq_context: 0 cgroup_mutex lock memcg_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex percpu_counters_lock irq_context: 0 cgroup_mutex shrinker_mutex irq_context: 0 cgroup_mutex shrinker_mutex pool_lock#2 irq_context: 0 cgroup_mutex &base->lock irq_context: 0 cgroup_mutex &base->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex memcg_idr_lock irq_context: 0 cgroup_mutex devcgroup_mutex irq_context: 0 cgroup_mutex cpu_hotplug_lock irq_context: 0 cgroup_mutex cpu_hotplug_lock freezer_mutex irq_context: 0 rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 init_sighand.siglock irq_context: 0 init_mm.page_table_lock irq_context: 0 init_files.file_lock irq_context: 0 rcu_read_lock init_sighand.siglock irq_context: 0 lock pidmap_lock irq_context: 0 lock pidmap_lock pool_lock#2 irq_context: 0 pidmap_lock irq_context: 0 cgroup_threadgroup_rwsem irq_context: 0 cgroup_threadgroup_rwsem css_set_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock init_sighand.siglock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 &p->pi_lock irq_context: 0 &p->pi_lock &rq->__lock irq_context: 0 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &p->pi_lock &rq->__lock &base->lock irq_context: 0 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &p->pi_lock irq_context: 0 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (kthreadd_done).wait.lock irq_context: 0 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock irq_context: 0 &p->alloc_lock irq_context: 0 &p->alloc_lock &____s->seqcount#2 irq_context: 0 fs_reclaim irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kthread_create_lock irq_context: 0 &x->wait irq_context: 0 rcu_read_lock &sighand->siglock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock &sighand->siglock irq_context: 0 &x->wait &p->pi_lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (worker)->lock irq_context: 0 wq_pool_mutex fs_reclaim irq_context: 0 wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 wq_pool_mutex kthread_create_lock irq_context: 0 wq_pool_mutex &p->pi_lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_mutex &x->wait irq_context: 0 wq_pool_mutex &rq->__lock irq_context: 0 wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex irq_context: 0 wq_mayday_lock irq_context: 0 wq_pool_mutex hrtimer_bases.lock irq_context: 0 wq_pool_mutex hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 wq_pool_mutex hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (null) irq_context: 0 (null) tk_core.seq.seqcount irq_context: 0 &xa->xa_lock irq_context: 0 &pool->lock irq_context: 0 &pool->lock &p->pi_lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&pool->mayday_timer) irq_context: 0 &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rnp->exp_poll_wq) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rnp->exp_poll_wq) &rnp->exp_poll_lock irq_context: 0 (wq_completion)events irq_context: 0 (wq_completion)events (work_completion)(&w->work) irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex patch_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&w->work) pool_lock#2 irq_context: 0 (&wq_watchdog_timer) irq_context: 0 (wq_completion)events_unbound irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex patch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) allocation_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq allocation_wait.lock irq_context: hardirq allocation_wait.lock &p->pi_lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 batched_entropy_u8.lock irq_context: 0 batched_entropy_u8.lock crngs.lock irq_context: 0 kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex irq_context: 0 rcu_tasks.cbs_gbl_lock irq_context: hardirq rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#2 irq_context: 0 rcu_tasks__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 rcu_tasks.tasks_gp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &base->lock irq_context: 0 rcu_tasks.tasks_gp_mutex &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_tasks__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks.tasks_gp_mutex &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks.tasks_gp_mutex kernel/rcu/tasks.h:155 irq_context: 0 rcu_tasks.tasks_gp_mutex &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks_trace.tasks_gp_mutex irq_context: 0 rcu_tasks_trace.cbs_gbl_lock irq_context: softirq (&timer.timer) irq_context: softirq (&timer.timer) &p->pi_lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&kfence_timer)->timer irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex (&timer.timer) irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_tasks__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_tasks__percpu.cbs_pcpu_lock &base->lock irq_context: 0 rcu_read_lock rcu_tasks__percpu.cbs_pcpu_lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock rcu_read_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &base->lock irq_context: 0 rcu_read_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &base->lock &obj_hash[i].lock irq_context: 0 (memory_chain).rwsem irq_context: 0 cpu_hotplug_lock smpboot_threads_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock fs_reclaim irq_context: 0 cpu_hotplug_lock smpboot_threads_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock smpboot_threads_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock smpboot_threads_lock kthread_create_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &x->wait irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock pool_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock hrtimer_bases.lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 cpu_hotplug_lock smpboot_threads_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &rcu_state.gp_wq irq_context: 0 &rnp->kthread_mutex irq_context: 0 &rnp->kthread_mutex fs_reclaim irq_context: 0 &rnp->kthread_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rnp->kthread_mutex &c->lock irq_context: 0 &rnp->kthread_mutex &____s->seqcount irq_context: 0 &rnp->kthread_mutex pool_lock#2 irq_context: 0 &rnp->kthread_mutex kthread_create_lock irq_context: 0 &rnp->kthread_mutex &p->pi_lock irq_context: 0 &rnp->kthread_mutex &p->pi_lock &rq->__lock irq_context: 0 &rnp->kthread_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rnp->kthread_mutex &x->wait irq_context: 0 &rnp->kthread_mutex &rq->__lock irq_context: 0 &rnp->kthread_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rnp->kthread_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex tmigr_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex tmigr_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex tmigr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex tmigr_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex tmigr_mutex &group->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex tmigr_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &tmc->lock irq_context: 0 &stop_pi_lock irq_context: 0 &stop_pi_lock &rq->__lock irq_context: 0 &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &stopper->lock irq_context: 0 (module_notify_list).rwsem irq_context: 0 ddebug_lock irq_context: 0 iort_msi_chip_lock irq_context: 0 irq_domain_mutex irq_context: 0 irq_domain_mutex fs_reclaim irq_context: 0 irq_domain_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 irq_domain_mutex pool_lock#2 irq_context: 0 cci_probing irq_context: 0 cci_probing devtree_lock irq_context: 0 resource_lock irq_context: 0 fixmap_lock fs_reclaim irq_context: 0 fixmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 fixmap_lock &____s->seqcount irq_context: 0 fixmap_lock &c->lock irq_context: 0 fixmap_lock pool_lock#2 irq_context: 0 rcu_read_lock ptlock_ptr(ptdesc) irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex clockevents_lock irq_context: 0 watchdog_mutex irq_context: 0 watchdog_mutex cpu_hotplug_lock irq_context: 0 watchdog_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 watchdog_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#3 irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 irq_context: 0 watchdog_mutex cpu_hotplug_lock &x->wait#4 irq_context: 0 &newf->file_lock irq_context: 0 init_fs.lock &dentry->d_lock irq_context: 0 &p->vtime.seqcount irq_context: 0 cpu_hotplug_lock mem_hotplug_lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.waiters.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock kthread_create_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &x->wait irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock hrtimer_bases.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &obj_hash[i].lock irq_context: softirq rcu_callback pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock cpu_hotplug_lock.waiters.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &swhash->hlist_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pmus_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pmus_lock &cpuctx_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcp_batch_high_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &xa->xa_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &c->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &____s->seqcount irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock kthread_create_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &x->wait irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock wq_pool_attach_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcpu_alloc_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock relay_channels_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rnp->kthread_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock tmigr_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock tmigr_mutex fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock tmigr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock tmigr_mutex pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock tmigr_mutex &group->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock tmigr_mutex &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock (cpu_running).wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &base->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &base->lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback mem_hotplug_lock.rss.gp_wait.lock irq_context: softirq rcu_callback mem_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: softirq rcu_callback cpu_hotplug_lock.rss.gp_wait.lock irq_context: hardirq jiffies_seq.seqcount irq_context: 0 its_lock irq_context: 0 its_lock &its->lock irq_context: 0 clockevents_lock tick_broadcast_lock irq_context: 0 clockevents_lock jiffies_seq.seqcount irq_context: 0 clockevents_lock tk_core.seq.seqcount irq_context: 0 &irq_desc_lock_class irq_context: 0 &irq_desc_lock_class irq_controller_lock irq_context: 0 (cpu_running).wait.lock irq_context: 0 (cpu_running).wait.lock &p->pi_lock irq_context: 0 &x->wait#5 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock (&timer.timer) irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &x->wait#5 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &x->wait#5 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up sparse_irq_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up sparse_irq_lock &irq_desc_lock_class irq_context: 0 cpu_hotplug_lock cpuhp_state-up &swhash->hlist_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up pmus_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up pmus_lock &cpuctx_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up &tmc->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#3 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &obj_hash[i].lock pool_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &x->wait#6 irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &pool->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_node_0 irq_context: 0 cpu_hotplug_lock cpuhp_state-up fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state-up fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state-up &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rnp->kthread_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rnp->kthread_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rnp->kthread_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up resource_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up resource_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &rt_b->rt_runtime_lock &rt_rq->rt_runtime_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 &x->wait#5 &p->pi_lock irq_context: hardirq &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock stop_cpus_mutex irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stopper->lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &rq->__lock irq_context: 0 &x->wait#7 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &base->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &x->wait#7 irq_context: 0 &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events rdist_memreserve_cpuhp_cleanup_work irq_context: 0 (wq_completion)events rdist_memreserve_cpuhp_cleanup_work cpu_hotplug_lock irq_context: 0 (wq_completion)events rdist_memreserve_cpuhp_cleanup_work cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 sched_domains_mutex irq_context: 0 sched_domains_mutex fs_reclaim irq_context: 0 sched_domains_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sched_domains_mutex &____s->seqcount irq_context: 0 sched_domains_mutex pool_lock#2 irq_context: 0 sched_domains_mutex &obj_hash[i].lock irq_context: 0 sched_domains_mutex pcpu_alloc_mutex irq_context: 0 sched_domains_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sched_domains_mutex &c->lock irq_context: 0 sched_domains_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 sched_domains_mutex rcu_read_lock pool_lock#2 irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &cp->lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &rt_b->rt_runtime_lock &rt_rq->rt_runtime_lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 sched_domains_mutex pcpu_lock irq_context: 0 &wq->mutex irq_context: 0 &wq->mutex &pool->lock irq_context: 0 slab_mutex fs_reclaim irq_context: 0 slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#6/1 irq_context: 0 &type->s_umount_key#6/1 fs_reclaim irq_context: 0 &type->s_umount_key#6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#6/1 pool_lock#2 irq_context: 0 &type->s_umount_key#6/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#6/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#6/1 shrinker_mutex irq_context: 0 &type->s_umount_key#6/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#6/1 sb_lock irq_context: 0 &type->s_umount_key#6/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#6/1 &c->lock irq_context: 0 &type->s_umount_key#6/1 &____s->seqcount irq_context: 0 &type->s_umount_key#6/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#6/1 percpu_counters_lock irq_context: 0 &type->s_umount_key#6/1 crngs.lock irq_context: 0 &type->s_umount_key#6/1 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key#6/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#6/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#6/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#6/1 batched_entropy_u32.lock irq_context: 0 &type->s_umount_key#6/1 &sb->s_type->i_lock_key#6 irq_context: 0 &type->s_umount_key#6/1 &sb->s_type->i_lock_key#6 &dentry->d_lock irq_context: 0 &type->s_umount_key#6/1 &dentry->d_lock irq_context: softirq rcu_read_lock &rq->__lock irq_context: 0 (setup_done).wait.lock irq_context: 0 namespace_sem irq_context: 0 namespace_sem fs_reclaim irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 namespace_sem &pcp->lock &zone->lock irq_context: 0 namespace_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 namespace_sem &____s->seqcount irq_context: 0 namespace_sem &c->lock irq_context: 0 namespace_sem pool_lock#2 irq_context: 0 namespace_sem mnt_id_ida.xa_lock irq_context: 0 namespace_sem pcpu_alloc_mutex irq_context: 0 namespace_sem pcpu_alloc_mutex pcpu_lock irq_context: 0 namespace_sem &dentry->d_lock irq_context: 0 namespace_sem mount_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 namespace_sem mnt_ns_tree_lock irq_context: 0 &p->alloc_lock init_fs.lock irq_context: 0 rcu_read_lock &____s->seqcount#3 irq_context: 0 file_systems_lock irq_context: 0 &type->s_umount_key#7 irq_context: 0 &type->s_umount_key#7 fs_reclaim irq_context: 0 &type->s_umount_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#7 pool_lock#2 irq_context: 0 &type->s_umount_key#7 &dentry->d_lock irq_context: 0 &type->s_umount_key#7 &____s->seqcount irq_context: 0 &type->s_umount_key#7 &c->lock irq_context: 0 &type->s_umount_key#7 &lru->node[i].lock irq_context: 0 &type->s_umount_key#7 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key#7 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#7 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key namespace_sem irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key namespace_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#2 irq_context: 0 rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_lock_key#6 irq_context: 0 &fs->lock irq_context: 0 &fs->lock &____s->seqcount#3 irq_context: 0 (setup_done).wait.lock &p->pi_lock irq_context: 0 req_lock irq_context: 0 (of_reconfig_chain).rwsem irq_context: 0 of_mutex irq_context: 0 of_mutex fs_reclaim irq_context: 0 of_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 of_mutex pool_lock#2 irq_context: 0 of_mutex lock irq_context: 0 of_mutex lock kernfs_idr_lock irq_context: 0 of_mutex &root->kernfs_rwsem irq_context: 0 of_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &x->wait#8 irq_context: 0 &k->list_lock irq_context: 0 bus_type_sem irq_context: 0 &root->kernfs_rwsem irq_context: 0 &dev->power.lock irq_context: 0 dpm_list_mtx irq_context: 0 uevent_sock_mutex irq_context: 0 running_helpers_waitq.lock irq_context: 0 sysfs_symlink_target_lock irq_context: 0 &k->k_lock irq_context: 0 lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex &k->list_lock irq_context: 0 &dev->mutex &k->k_lock irq_context: 0 &dev->mutex &dev->power.lock irq_context: 0 subsys mutex irq_context: 0 memory_blocks.xa_lock irq_context: 0 memory_blocks.xa_lock pool_lock#2 irq_context: softirq (&timer.timer) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex (&timer.timer) irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &stop_pi_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex (console_sem).lock irq_context: 0 lock kernfs_idr_lock &c->lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 subsys mutex#2 irq_context: 0 register_lock irq_context: 0 register_lock proc_subdir_lock irq_context: 0 register_lock fs_reclaim irq_context: 0 register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock irq_context: 0 register_lock proc_subdir_lock irq_context: 0 register_lock &c->lock irq_context: 0 register_lock &____s->seqcount irq_context: 0 register_lock proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 wq_pool_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 wq_pool_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 &x->wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_pm_notifier.lock irq_context: 0 (cpufreq_policy_notifier_list).rwsem irq_context: 0 (pm_chain_head).rwsem irq_context: 0 cpufreq_governor_mutex irq_context: 0 rcu_state.exp_mutex (worker)->lock irq_context: 0 rcu_state.exp_mutex (worker)->lock &p->pi_lock irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_wake_mutex irq_context: 0 rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock irq_context: 0 clocksource_mutex irq_context: 0 syscore_ops_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex (worker)->lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex (worker)->lock &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex (worker)->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex (worker)->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rcu_state.expedited_wq irq_context: 0 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 binfmt_lock irq_context: 0 pin_fs_lock irq_context: 0 &type->s_umount_key#8/1 irq_context: 0 &type->s_umount_key#8/1 fs_reclaim irq_context: 0 &type->s_umount_key#8/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#8/1 pool_lock#2 irq_context: 0 &type->s_umount_key#8/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#8/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#8/1 shrinker_mutex irq_context: 0 &type->s_umount_key#8/1 &____s->seqcount irq_context: 0 &type->s_umount_key#8/1 &c->lock irq_context: 0 &type->s_umount_key#8/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#8/1 sb_lock irq_context: 0 &type->s_umount_key#8/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#8/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#8/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#8/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#8/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#8/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#8/1 &sb->s_type->i_lock_key#7 irq_context: 0 &type->s_umount_key#8/1 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &type->s_umount_key#8/1 &dentry->d_lock irq_context: 0 rcu_read_lock mount_lock irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &sb->s_type->i_lock_key#7 irq_context: 0 &sb->s_type->i_mutex_key#2 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &type->s_umount_key#9/1 irq_context: 0 &type->s_umount_key#9/1 fs_reclaim irq_context: 0 &type->s_umount_key#9/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#9/1 pool_lock#2 irq_context: 0 &type->s_umount_key#9/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#9/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#9/1 shrinker_mutex irq_context: 0 &type->s_umount_key#9/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#9/1 sb_lock irq_context: 0 &type->s_umount_key#9/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#9/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#9/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#9/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#9/1 &sb->s_type->i_lock_key#8 irq_context: 0 &type->s_umount_key#9/1 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 &type->s_umount_key#9/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 irq_context: 0 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 chrdevs_lock irq_context: 0 gpio_devices_srcu irq_context: 0 cb_lock irq_context: 0 cb_lock genl_mutex irq_context: 0 cb_lock genl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex pool_lock#2 irq_context: 0 subsys mutex#3 irq_context: 0 async_lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 regulator_list_mutex irq_context: 0 (wq_completion)async irq_context: 0 (wq_completion)async (work_completion)(&entry->work) irq_context: 0 (wq_completion)async (work_completion)(&entry->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex device_links_srcu irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex device_links_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex fs_reclaim irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &dev->devres_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &c->lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex pinctrl_list_mutex irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex devtree_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex pinctrl_maps_mutex irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex pinctrl_list_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex pinctrl_list_mutex pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &k->list_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &x->wait#8 irq_context: 0 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex regulator_nesting_mutex irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex regulator_ww_class_mutex regulator_nesting_mutex irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex gdp_mutex irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex gdp_mutex lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex bus_type_sem irq_context: 0 (reboot_notifier_list).rwsem irq_context: 0 &vn->lazy.lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex dpm_list_mtx irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex uevent_sock_mutex irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &k->k_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex subsys mutex#4 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex subsys mutex#4 &k->k_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex pin_fs_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &x->wait#5 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex regulator_list_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex deferred_probe_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) async_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) async_lock &obj_hash[i].lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) async_lock pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) async_done.lock irq_context: 0 &x->wait#5 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#10/1 irq_context: 0 &type->s_umount_key#10/1 fs_reclaim irq_context: 0 &type->s_umount_key#10/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#10/1 pool_lock#2 irq_context: 0 &type->s_umount_key#10/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#10/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#10/1 shrinker_mutex irq_context: 0 &type->s_umount_key#10/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#10/1 sb_lock irq_context: 0 &type->s_umount_key#10/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#10/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#10/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#10/1 &____s->seqcount irq_context: 0 &type->s_umount_key#10/1 &c->lock irq_context: 0 &type->s_umount_key#10/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#10/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#10/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#10/1 &sb->s_type->i_lock_key#9 irq_context: 0 &type->s_umount_key#10/1 &sb->s_type->i_lock_key#9 &dentry->d_lock irq_context: 0 &type->s_umount_key#10/1 &dentry->d_lock irq_context: 0 pernet_ops_rwsem fs_reclaim irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem &c->lock irq_context: 0 pernet_ops_rwsem sysctl_lock irq_context: 0 pack_mutex irq_context: 0 pack_mutex fs_reclaim irq_context: 0 pack_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pack_mutex &____s->seqcount irq_context: 0 pack_mutex &c->lock irq_context: 0 pack_mutex pool_lock#2 irq_context: 0 pack_mutex free_vmap_area_lock irq_context: 0 pack_mutex &vn->busy.lock irq_context: 0 pack_mutex init_mm.page_table_lock irq_context: 0 pack_mutex &pcp->lock &zone->lock irq_context: 0 pack_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pack_mutex vmap_purge_lock irq_context: 0 pack_mutex vmap_purge_lock &vn->lazy.lock irq_context: 0 pack_mutex vmap_purge_lock init_mm.page_table_lock irq_context: 0 pack_mutex vmap_purge_lock init_mm.page_table_lock &obj_hash[i].lock irq_context: 0 pack_mutex vmap_purge_lock &vn->pool_lock irq_context: 0 pack_mutex vmap_purge_lock free_vmap_area_lock irq_context: 0 pack_mutex vmap_purge_lock free_vmap_area_lock &obj_hash[i].lock irq_context: 0 pack_mutex vmap_purge_lock free_vmap_area_lock pool_lock#2 irq_context: 0 patch_lock irq_context: 0 &fp->aux->used_maps_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 proto_list_mutex irq_context: 0 targets_mutex irq_context: 0 nl_table_lock irq_context: 0 nl_table_wait.lock irq_context: 0 net_family_lock irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &dir->lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex irq_context: 0 rtnl_mutex fs_reclaim irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sparse_irq_lock irq_context: 0 sparse_irq_lock fs_reclaim irq_context: 0 sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sparse_irq_lock pool_lock#2 irq_context: 0 sparse_irq_lock lock irq_context: 0 sparse_irq_lock lock kernfs_idr_lock irq_context: 0 sparse_irq_lock &root->kernfs_rwsem irq_context: 0 sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sparse_irq_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 sparse_irq_lock &c->lock irq_context: 0 sparse_irq_lock &____s->seqcount irq_context: 0 &cma->lock irq_context: 0 cma_mutex irq_context: 0 cma_mutex &zone->lock irq_context: 0 cma_mutex &zone->lock &____s->seqcount irq_context: 0 cma_mutex pcpu_drain_mutex &pcp->lock irq_context: 0 cma_mutex pcpu_drain_mutex &pcp->lock &zone->lock irq_context: 0 cma_mutex pcpu_drain_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cma_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cma_mutex rcu_state.exp_mutex (worker)->lock irq_context: 0 cma_mutex rcu_state.exp_mutex (worker)->lock &p->pi_lock irq_context: 0 cma_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cma_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cma_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock irq_context: 0 cma_mutex lock#2 irq_context: 0 cma_mutex &obj_hash[i].lock irq_context: 0 &vn->pool_lock irq_context: 0 &pool->lock#2 irq_context: 0 cma_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cma_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 freezer_lock irq_context: 0 audit_backlog_wait.lock irq_context: 0 kauditd_wait.lock irq_context: 0 &list->lock irq_context: 0 kauditd_wait.lock &p->pi_lock irq_context: 0 lock#3 irq_context: 0 lock#3 &zone->lock irq_context: 0 pcp_batch_high_lock irq_context: 0 khugepaged_mutex irq_context: 0 &(&priv->bus_notifier)->rwsem irq_context: 0 gdp_mutex irq_context: 0 gdp_mutex &k->list_lock irq_context: 0 gdp_mutex fs_reclaim irq_context: 0 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 gdp_mutex pool_lock#2 irq_context: 0 gdp_mutex lock irq_context: 0 gdp_mutex lock kernfs_idr_lock irq_context: 0 gdp_mutex &root->kernfs_rwsem irq_context: 0 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 subsys mutex#5 irq_context: 0 subsys mutex#5 &k->k_lock irq_context: 0 subsys mutex#6 irq_context: 0 subsys mutex#6 &k->list_lock irq_context: 0 subsys mutex#6 &k->k_lock irq_context: 0 regmap_debugfs_early_lock irq_context: 0 (acpi_reconfig_chain).rwsem irq_context: 0 __i2c_board_lock irq_context: 0 core_lock irq_context: 0 core_lock &k->list_lock irq_context: 0 core_lock &k->k_lock irq_context: 0 cb_lock genl_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex nl_table_wait.lock irq_context: 0 quarantine_lock irq_context: 0 nl_table_lock irq_context: 0 remove_cache_srcu irq_context: 0 remove_cache_srcu quarantine_lock irq_context: 0 thermal_governor_lock irq_context: 0 thermal_governor_lock thermal_list_lock irq_context: 0 cpuidle_lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpuidle_lock rcu_state.exp_mutex (worker)->lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex (worker)->lock &p->pi_lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex (worker)->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuidle_lock (console_sem).lock irq_context: 0 mmu_notifier_invalidate_range_start irq_context: 0 &dir->lock irq_context: 0 k-sk_lock-AF_QIPCRTR irq_context: 0 k-sk_lock-AF_QIPCRTR k-slock-AF_QIPCRTR irq_context: 0 k-slock-AF_QIPCRTR irq_context: 0 k-sk_lock-AF_QIPCRTR fs_reclaim irq_context: 0 k-sk_lock-AF_QIPCRTR fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 k-sk_lock-AF_QIPCRTR qrtr_ports.xa_lock irq_context: 0 k-sk_lock-AF_QIPCRTR pool_lock#2 irq_context: 0 k-sk_lock-AF_QIPCRTR qrtr_node_lock irq_context: 0 k-sk_lock-AF_QIPCRTR &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex irq_context: 0 resource_lock pool_lock#2 irq_context: 0 resource_lock &obj_hash[i].lock irq_context: 0 resource_lock &c->lock irq_context: 0 resource_lock &____s->seqcount irq_context: 0 crngs.lock irq_context: 0 (crypto_chain).rwsem irq_context: 0 tty_mutex irq_context: 0 iova_cache_mutex irq_context: 0 iova_cache_mutex slab_mutex irq_context: 0 iova_cache_mutex slab_mutex fs_reclaim irq_context: 0 iova_cache_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 iova_cache_mutex slab_mutex pool_lock#2 irq_context: 0 iova_cache_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 iova_cache_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 iova_cache_mutex cpu_hotplug_lock irq_context: 0 iova_cache_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 subsys mutex#7 irq_context: 0 subsys mutex#7 &k->k_lock irq_context: 0 device_links_lock irq_context: 0 uidhash_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &obj_hash[i].lock irq_context: 0 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 oom_reaper_wait.lock irq_context: 0 subsys mutex#8 irq_context: 0 &pgdat->kcompactd_wait irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) pool_lock#2 irq_context: 0 memory_tier_lock irq_context: 0 memory_tier_lock fs_reclaim irq_context: 0 memory_tier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 memory_tier_lock pool_lock#2 irq_context: 0 memory_tier_lock (memory_chain).rwsem irq_context: 0 khugepaged_mutex fs_reclaim irq_context: 0 khugepaged_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 khugepaged_mutex pool_lock#2 irq_context: 0 khugepaged_mutex kthread_create_lock irq_context: 0 khugepaged_mutex &p->pi_lock irq_context: 0 khugepaged_mutex &p->pi_lock &rq->__lock irq_context: 0 khugepaged_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 khugepaged_mutex &x->wait irq_context: 0 khugepaged_mutex &rq->__lock irq_context: 0 khugepaged_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ksm_thread_mutex irq_context: 0 ksm_thread_wait.lock irq_context: 0 khugepaged_mutex &obj_hash[i].lock irq_context: 0 khugepaged_mutex lock#3 irq_context: 0 khugepaged_mutex lock#3 &zone->lock irq_context: 0 khugepaged_mutex pcp_batch_high_lock irq_context: 0 cgroup_mutex fs_reclaim irq_context: 0 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_ops_lock irq_context: 0 crypto_alg_sem irq_context: 0 crypto_alg_sem (crypto_chain).rwsem irq_context: 0 cpu_hotplug_lock fs_reclaim irq_context: 0 cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex pcpu_alloc_mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex kthread_create_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 lock#2 irq_context: 0 khugepaged_mm_lock irq_context: 0 khugepaged_wait.lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &x->wait irq_context: 0 cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock (worker)->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock (worker)->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock (worker)->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock (worker)->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 krc.lock irq_context: 0 krc.lock &obj_hash[i].lock irq_context: 0 krc.lock hrtimer_bases.lock irq_context: 0 krc.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 krc.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 krc.lock &base->lock irq_context: 0 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 wq_pool_mutex wq_pool_mutex.wait_lock irq_context: 0 wq_pool_mutex.wait_lock irq_context: 0 bio_slab_lock irq_context: 0 bio_slab_lock fs_reclaim irq_context: 0 bio_slab_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bio_slab_lock pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex irq_context: 0 bio_slab_lock slab_mutex fs_reclaim irq_context: 0 bio_slab_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bio_slab_lock slab_mutex pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex pcpu_alloc_mutex irq_context: 0 bio_slab_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock &c->lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock &____s->seqcount irq_context: 0 bio_slab_lock bio_slabs.xa_lock pool_lock#2 irq_context: 0 major_names_lock irq_context: 0 major_names_lock fs_reclaim irq_context: 0 major_names_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 major_names_lock pool_lock#2 irq_context: 0 major_names_lock major_names_spinlock irq_context: hardirq rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: hardirq rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_highpri irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) fs_reclaim irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &____s->seqcount irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) krc.lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: softirq (&pool->mayday_timer) &pool->lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock irq_context: softirq (&pool->mayday_timer) &obj_hash[i].lock irq_context: softirq (&pool->mayday_timer) &base->lock irq_context: softirq (&pool->mayday_timer) &base->lock &obj_hash[i].lock irq_context: softirq (&rtpcp->lazy_timer) irq_context: softirq (&rtpcp->lazy_timer) rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: softirq (&rtpcp->lazy_timer) rcu_tasks__percpu.cbs_pcpu_lock irq_context: softirq (&rtpcp->lazy_timer) rcu_tasks__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: softirq (&rtpcp->lazy_timer) rcu_tasks__percpu.cbs_pcpu_lock &base->lock irq_context: softirq (&rtpcp->lazy_timer) rcu_tasks__percpu.cbs_pcpu_lock &base->lock &obj_hash[i].lock irq_context: hardirq &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: hardirq &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex (console_sem).lock irq_context: 0 &pgdat->kswapd_lock irq_context: softirq drivers/char/random.c:255 irq_context: softirq drivers/char/random.c:255 rcu_read_lock &pool->lock irq_context: softirq drivers/char/random.c:255 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/char/random.c:255 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/char/random.c:255 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/char/random.c:255 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (next_reseed).work irq_context: 0 (wq_completion)events_unbound (next_reseed).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work &base->lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work input_pool.lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work base_crng.lock irq_context: softirq mm/vmstat.c:2046 irq_context: softirq mm/vmstat.c:2046 rcu_read_lock &pool->lock irq_context: softirq mm/vmstat.c:2046 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq mm/vmstat.c:2046 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq mm/vmstat.c:2046 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq mm/vmstat.c:2046 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (shepherd).work irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (shepherd).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (shepherd).work &base->lock irq_context: 0 (wq_completion)events (shepherd).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock &obj_hash[i].lock irq_context: 0 &pool->lock &base->lock irq_context: 0 &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 slab_mutex batched_entropy_u8.lock irq_context: 0 slab_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 slab_mutex batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 slab_mutex kfence_freelist_lock irq_context: 0 console_lock fs_reclaim irq_context: 0 console_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 console_lock &x->wait#8 irq_context: 0 console_lock &k->list_lock irq_context: 0 console_lock gdp_mutex irq_context: 0 console_lock gdp_mutex &k->list_lock irq_context: 0 console_lock gdp_mutex fs_reclaim irq_context: 0 console_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 console_lock gdp_mutex pool_lock#2 irq_context: 0 console_lock gdp_mutex lock irq_context: 0 console_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 console_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 console_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 console_lock lock irq_context: 0 console_lock lock kernfs_idr_lock irq_context: 0 console_lock &root->kernfs_rwsem irq_context: 0 console_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 console_lock bus_type_sem irq_context: 0 console_lock sysfs_symlink_target_lock irq_context: 0 console_lock &root->kernfs_rwsem irq_context: 0 console_lock &dev->power.lock irq_context: 0 console_lock dpm_list_mtx irq_context: 0 console_lock uevent_sock_mutex irq_context: 0 console_lock running_helpers_waitq.lock irq_context: 0 console_lock subsys mutex#9 irq_context: 0 console_lock subsys mutex#9 &k->k_lock irq_context: 0 shrink_qlist.lock irq_context: 0 remove_cache_srcu_srcu_usage.lock irq_context: 0 remove_cache_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 &ACCESS_PRIVATE(sdp, lock) irq_context: 0 remove_cache_srcu irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &x->wait#9 irq_context: softirq &(&ssp->srcu_sup->work)->timer irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#9 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#9 &p->pi_lock irq_context: 0 cpu_hotplug_lock flush_lock irq_context: 0 cpu_hotplug_lock flush_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock (work_completion)(&sfw->work) irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock (wq_completion)slub_flushwq irq_context: 0 cpu_hotplug_lock flush_lock &x->wait#10 irq_context: 0 cpu_hotplug_lock flush_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)slub_flushwq irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &c->lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &n->list_lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &obj_hash[i].lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq allocation_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &meta->lock irq_context: 0 &n->list_lock &c->lock irq_context: 0 clk_debug_lock irq_context: 0 clocks_mutex irq_context: 0 acpi_scan_lock irq_context: 0 acpi_scan_lock semaphore->lock irq_context: 0 acpi_scan_lock fs_reclaim irq_context: 0 acpi_scan_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &c->lock irq_context: 0 acpi_scan_lock &____s->seqcount irq_context: 0 acpi_scan_lock pool_lock#2 irq_context: 0 acpi_scan_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &x->wait#8 irq_context: 0 acpi_scan_lock acpi_device_lock irq_context: 0 acpi_scan_lock acpi_device_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_device_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_device_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_device_lock &xa->xa_lock#2 irq_context: 0 acpi_scan_lock acpi_device_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_device_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &k->list_lock irq_context: 0 acpi_scan_lock lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock bus_type_sem irq_context: 0 acpi_scan_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &k->k_lock irq_context: 0 acpi_scan_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->power.lock irq_context: 0 acpi_scan_lock dpm_list_mtx irq_context: 0 acpi_scan_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock subsys mutex#10 irq_context: 0 acpi_scan_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock &n->list_lock irq_context: 0 acpi_scan_lock &n->list_lock &c->lock irq_context: 0 acpi_scan_lock quarantine_lock irq_context: 0 acpi_scan_lock acpi_ioremap_lock irq_context: 0 acpi_scan_lock acpi_ioremap_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_ioremap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_ioremap_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_ioremap_lock free_vmap_area_lock irq_context: 0 acpi_scan_lock acpi_ioremap_lock free_vmap_area_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_ioremap_lock free_vmap_area_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_ioremap_lock &vn->busy.lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex (worker)->lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex (worker)->lock &p->pi_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex (worker)->lock &p->pi_lock &rq->__lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex (worker)->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock devtree_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &x->wait#8 irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock fs_reclaim irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock pool_lock#2 irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &k->list_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock bus_type_sem irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &k->k_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &c->lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &____s->seqcount irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &dev->power.lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dpm_list_mtx irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock subsys mutex#11 irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_mtx irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_mtx fs_reclaim irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_mtx pool_lock#2 irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_mtx &dev->power.lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_mtx pm_qos_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_sysfs_mtx irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_sysfs_mtx fs_reclaim irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_sysfs_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_sysfs_mtx pool_lock#2 irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_sysfs_mtx lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_sysfs_mtx lock kernfs_idr_lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock (console_sem).lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock.waiters.lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 acpi_scan_lock cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &device->physical_node_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock fs_reclaim irq_context: 0 acpi_scan_lock &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &device->physical_node_lock pool_lock#2 irq_context: 0 acpi_scan_lock &device->physical_node_lock lock irq_context: 0 acpi_scan_lock &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock irq_domain_mutex irq_context: 0 acpi_scan_lock &domain->mutex irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock fs_reclaim irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock pool_lock#2 irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock pcpu_alloc_mutex irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock &____s->seqcount irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock &c->lock irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock lock irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock &domain->mutex &irq_desc_lock_class irq_context: 0 acpi_scan_lock &domain->mutex fs_reclaim irq_context: 0 acpi_scan_lock &domain->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &domain->mutex pool_lock#2 irq_context: 0 acpi_scan_lock resource_lock irq_context: 0 acpi_scan_lock &(&priv->bus_notifier)->rwsem irq_context: 0 acpi_scan_lock &(&priv->bus_notifier)->rwsem &device->physical_node_lock irq_context: 0 acpi_scan_lock fwnode_link_lock irq_context: 0 acpi_scan_lock fwnode_link_lock &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex &device->physical_node_lock irq_context: 0 acpi_scan_lock &dev->mutex device_links_srcu irq_context: 0 acpi_scan_lock &dev->mutex fwnode_link_lock irq_context: 0 acpi_scan_lock &dev->mutex device_links_lock irq_context: 0 acpi_scan_lock &dev->mutex fs_reclaim irq_context: 0 acpi_scan_lock &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &dev->mutex pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex &dev->devres_lock irq_context: 0 acpi_scan_lock &dev->mutex pinctrl_list_mutex irq_context: 0 acpi_scan_lock &dev->mutex devtree_lock irq_context: 0 acpi_scan_lock &dev->mutex pinctrl_maps_mutex irq_context: 0 acpi_scan_lock &dev->mutex pinctrl_list_mutex &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex pinctrl_list_mutex pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 acpi_scan_lock &dev->mutex sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &dev->mutex &____s->seqcount irq_context: 0 acpi_scan_lock &dev->mutex lock irq_context: 0 acpi_scan_lock &dev->mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock semaphore->lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock fs_reclaim irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock wakeup_ida.xa_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &x->wait#8 irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &c->lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &____s->seqcount irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock bus_type_sem irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#12 irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#12 &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock events_lock irq_context: 0 acpi_scan_lock &dev->mutex acpi_wakeup_lock irq_context: 0 acpi_scan_lock &dev->mutex &c->lock irq_context: 0 acpi_scan_lock &dev->mutex resource_lock irq_context: 0 acpi_scan_lock &dev->mutex free_vmap_area_lock irq_context: 0 acpi_scan_lock &dev->mutex &vn->busy.lock irq_context: 0 acpi_scan_lock &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock &dev->mutex chrdevs_lock irq_context: 0 acpi_scan_lock &dev->mutex tty_mutex irq_context: 0 acpi_scan_lock &dev->mutex proc_subdir_lock irq_context: 0 acpi_scan_lock &dev->mutex proc_inum_ida.xa_lock irq_context: 0 acpi_scan_lock &dev->mutex proc_subdir_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex fs_reclaim irq_context: 0 acpi_scan_lock &dev->mutex port_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &____s->seqcount irq_context: 0 acpi_scan_lock &dev->mutex port_mutex pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &x->wait#8 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex bus_type_sem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->power.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex dpm_list_mtx irq_context: 0 acpi_scan_lock &dev->mutex port_mutex uevent_sock_mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex running_helpers_waitq.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex device_links_srcu irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex fwnode_link_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex device_links_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex fs_reclaim irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &dev->devres_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex pinctrl_list_mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex devtree_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex pinctrl_maps_mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex pinctrl_list_mutex &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex pinctrl_list_mutex pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex deferred_probe_mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex uevent_sock_mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex subsys mutex#13 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &xa->xa_lock#3 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &c->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &c->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &____s->seqcount irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &dev->mutex &rq->__lock irq_context: 0 (wq_completion)pm irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock &dev->power.wait_queue irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex fs_reclaim irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex (console_sem).lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &port_lock_key irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_mutex &port_lock_key irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_mutex syslog_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_mutex (console_sem).lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_mutex console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_mutex console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_mutex console_lock console_srcu console_owner console_owner_lock irq_context: softirq &(&group->avgs_work)->timer irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq mm/memcontrol.c:511 irq_context: softirq mm/memcontrol.c:511 rcu_read_lock &pool->lock irq_context: softirq mm/memcontrol.c:511 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq mm/memcontrol.c:511 rcu_read_lock &pool->lock &obj_hash[i].lock pool_lock irq_context: softirq mm/memcontrol.c:511 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq mm/memcontrol.c:511 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq mm/memcontrol.c:511 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &base->lock &obj_hash[i].lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&vmstat_work))) *)((&vmstat_work))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &base->lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &base->lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_mutex &rq->__lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex batched_entropy_u8.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex batched_entropy_u8.lock crngs.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex kfence_freelist_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex ctrl_ida.xa_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &x->wait#8 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &dev->power.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &____s->seqcount irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock fs_reclaim irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex semaphore->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &c->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex dpm_list_mtx irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex uevent_sock_mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex running_helpers_waitq.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &dev->mutex &port_lock_key irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &dev->mutex &dev->power.lock hrtimer_bases.lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &dev->mutex &dev->power.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex subsys mutex#14 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &n->list_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &n->list_lock &c->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &k->k_lock klist_remove_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex deferred_probe_mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &root->kernfs_rwsem pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &____s->seqcount irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex device_links_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex &c->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex &____s->seqcount irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex bus_type_sem irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex req_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &p->pi_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &p->pi_lock &rq->__lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &rq->__lock irq_context: 0 sb_writers irq_context: 0 sb_writers mount_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sbinfo->stat_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &s->s_inode_list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &xattrs->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &simple_offset_lock_class irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &simple_offset_lock_class pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#6 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#6 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 &x->wait#11 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &x->wait#11 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex subsys mutex#15 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex subsys mutex#15 &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex chrdevs_lock irq_context: 0 acpi_scan_lock &dev->mutex fwnode_link_lock &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 acpi_scan_lock &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex deferred_probe_mutex irq_context: 0 acpi_scan_lock &dev->mutex uevent_sock_mutex irq_context: 0 acpi_scan_lock &dev->mutex running_helpers_waitq.lock irq_context: 0 acpi_scan_lock &dev->mutex probe_waitqueue.lock irq_context: 0 acpi_scan_lock subsys mutex#3 irq_context: 0 acpi_scan_lock &dev->mutex init_mm.page_table_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#11 &p->pi_lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 acpi_scan_lock &domain->mutex sparse_irq_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex acpi_pm_notifier_install_lock &c->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: softirq rcu_callback cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &device->physical_node_lock &c->lock irq_context: 0 acpi_scan_lock &dev->mutex port_mutex &port->mutex &root->kernfs_rwsem &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock &c->lock irq_context: 0 acpi_scan_lock &device->physical_node_lock &____s->seqcount irq_context: 0 acpi_scan_lock &device->physical_node_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock (console_sem).lock irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &rsp->gp_wait irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) pcpu_lock irq_context: 0 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 acpi_scan_lock free_vmap_area_lock irq_context: 0 acpi_scan_lock &vn->busy.lock irq_context: 0 acpi_scan_lock init_mm.page_table_lock irq_context: 0 acpi_scan_lock io_range_mutex irq_context: 0 acpi_scan_lock pci_bus_sem irq_context: 0 acpi_scan_lock gdp_mutex irq_context: 0 acpi_scan_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock gdp_mutex &c->lock irq_context: 0 acpi_scan_lock gdp_mutex &____s->seqcount irq_context: 0 acpi_scan_lock gdp_mutex lock irq_context: 0 acpi_scan_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock subsys mutex#16 irq_context: 0 acpi_scan_lock subsys mutex#16 &k->k_lock irq_context: 0 acpi_scan_lock acpi_hp_context_lock irq_context: 0 acpi_scan_lock acpi_hp_context_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_hp_context_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_hp_context_lock pool_lock#2 irq_context: 0 acpi_scan_lock bridge_mutex irq_context: 0 acpi_scan_lock pci_bus_sem irq_context: 0 acpi_scan_lock pci_lock irq_context: 0 acpi_scan_lock pci_acpi_companion_lookup_sem irq_context: 0 acpi_scan_lock pci_slot_mutex irq_context: 0 acpi_scan_lock resource_alignment_lock irq_context: 0 acpi_scan_lock device_links_srcu irq_context: 0 acpi_scan_lock &dev->power.lock &dev->power.lock/1 irq_context: 0 acpi_scan_lock iort_msi_chip_lock irq_context: 0 acpi_scan_lock subsys mutex#17 irq_context: 0 acpi_scan_lock batched_entropy_u8.lock irq_context: 0 acpi_scan_lock kfence_freelist_lock irq_context: 0 acpi_scan_lock &meta->lock irq_context: 0 acpi_scan_lock devtree_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock wakeup_ida.xa_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &x->wait#8 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->list_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock bus_type_sem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->k_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#12 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#12 &k->k_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock events_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock acpi_link_lock irq_context: 0 acpi_scan_lock acpi_link_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_link_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_link_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_link_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_link_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_link_lock (console_sem).lock irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock acpi_link_lock &c->lock irq_context: 0 acpi_scan_lock acpi_link_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_dep_list_lock irq_context: 0 acpi_scan_lock power_resource_list_lock irq_context: 0 acpi_device_lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem pool_lock#2 irq_context: 0 klist_remove_lock irq_context: 0 &k->k_lock klist_remove_lock irq_context: 0 rcu_state.exp_mutex (worker)->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_state.exp_mutex (worker)->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kernfs_idr_lock irq_context: 0 kernfs_idr_lock &obj_hash[i].lock irq_context: 0 kernfs_idr_lock pool_lock#2 irq_context: 0 console_lock console_srcu console_owner_lock irq_context: 0 console_lock console_srcu console_owner irq_context: 0 console_lock console_srcu console_owner &port_lock_key irq_context: 0 console_lock console_srcu console_owner console_owner_lock irq_context: 0 k-sk_lock-AF_NETLINK irq_context: 0 k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 k-slock-AF_NETLINK irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#11/1 irq_context: 0 &type->s_umount_key#11/1 fs_reclaim irq_context: 0 &type->s_umount_key#11/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#11/1 pool_lock#2 irq_context: 0 &type->s_umount_key#11/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#11/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#11/1 shrinker_mutex irq_context: 0 &type->s_umount_key#11/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#11/1 sb_lock irq_context: 0 &type->s_umount_key#11/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#11/1 &____s->seqcount irq_context: 0 &type->s_umount_key#11/1 &c->lock irq_context: 0 &type->s_umount_key#11/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#11/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#11/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#11/1 &sb->s_type->i_lock_key#10 irq_context: 0 &type->s_umount_key#11/1 &sb->s_type->i_lock_key#10 &dentry->d_lock irq_context: 0 &type->s_umount_key#11/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#12/1 irq_context: 0 &type->s_umount_key#12/1 fs_reclaim irq_context: 0 &type->s_umount_key#12/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#12/1 pool_lock#2 irq_context: 0 &type->s_umount_key#12/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#12/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#12/1 shrinker_mutex irq_context: 0 &type->s_umount_key#12/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#12/1 sb_lock irq_context: 0 &type->s_umount_key#12/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#12/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#12/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#12/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#12/1 &sb->s_type->i_lock_key#11 irq_context: 0 &type->s_umount_key#12/1 &sb->s_type->i_lock_key#11 &dentry->d_lock irq_context: 0 &type->s_umount_key#12/1 &dentry->d_lock irq_context: 0 &mm->mmap_lock irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start dma_fence_map irq_context: 0 &mm->mmap_lock irq_context: 0 key irq_context: 0 attribute_container_mutex irq_context: 0 triggers_list_lock irq_context: 0 leds_list_lock irq_context: 0 bus_type_sem irq_context: 0 (usb_notifier_list).rwsem irq_context: 0 &device->physical_node_lock irq_context: 0 rc_map_lock irq_context: 0 pci_lock irq_context: 0 subsys mutex#18 irq_context: 0 &(&priv->bus_notifier)->rwsem irq_context: 0 &(&priv->bus_notifier)->rwsem iommu_probe_device_lock irq_context: 0 &(&priv->bus_notifier)->rwsem iommu_probe_device_lock iommu_device_lock irq_context: 0 (efi_runtime_lock).lock irq_context: 0 &x->wait#12 irq_context: 0 (wq_completion)efi_rts_wq irq_context: 0 (wq_completion)efi_rts_wq (work_completion)(&efi_rts_work.work) irq_context: 0 (wq_completion)efi_rts_wq (work_completion)(&efi_rts_work.work) cpu_asid_lock irq_context: 0 (wq_completion)efi_rts_wq (work_completion)(&efi_rts_work.work) efi_rt_lock irq_context: 0 (wq_completion)efi_rts_wq (work_completion)(&efi_rts_work.work) &x->wait#12 irq_context: 0 (wq_completion)efi_rts_wq (work_completion)(&efi_rts_work.work) &x->wait#12 &p->pi_lock irq_context: 0 (efivars_lock).lock irq_context: 0 devfreq_list_lock irq_context: 0 &entry->access irq_context: 0 info_mutex irq_context: 0 info_mutex proc_subdir_lock irq_context: 0 info_mutex fs_reclaim irq_context: 0 info_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 info_mutex pool_lock#2 irq_context: 0 info_mutex proc_inum_ida.xa_lock irq_context: 0 info_mutex proc_subdir_lock irq_context: 0 kobj_ns_type_lock irq_context: 0 page_pools_lock irq_context: 0 page_pools_lock fs_reclaim irq_context: 0 page_pools_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 page_pools_lock page_pools.xa_lock irq_context: 0 page_pools_lock page_pools.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#4 irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#4 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &x->wait#8 irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex running_helpers_waitq.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#19 irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#19 &k->k_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex input_pool.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex netdev_rename_lock.seqcount irq_context: 0 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock nl_table_lock irq_context: 0 rcu_read_lock nl_table_wait.lock irq_context: 0 rcu_read_lock quarantine_lock irq_context: 0 qdisc_mod_lock irq_context: 0 rtnl_mutex &c->lock irq_context: 0 rtnl_mutex &____s->seqcount irq_context: 0 bt_proto_lock irq_context: 0 hci_cb_list_lock irq_context: 0 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 mgmt_chan_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 rate_ctrl_mutex irq_context: 0 rate_ctrl_mutex fs_reclaim irq_context: 0 rate_ctrl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rate_ctrl_mutex pool_lock#2 irq_context: 0 netlbl_domhsh_lock irq_context: 0 netlbl_unlhsh_lock irq_context: 0 rcu_read_lock netlbl_domhsh_lock irq_context: 0 rcu_read_lock netlbl_domhsh_lock pool_lock#2 irq_context: 0 misc_mtx irq_context: 0 misc_mtx fs_reclaim irq_context: 0 misc_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx &____s->seqcount irq_context: 0 misc_mtx pool_lock#2 irq_context: 0 misc_mtx &x->wait#8 irq_context: 0 misc_mtx &obj_hash[i].lock irq_context: 0 misc_mtx &k->list_lock irq_context: 0 misc_mtx gdp_mutex irq_context: 0 misc_mtx gdp_mutex &k->list_lock irq_context: 0 misc_mtx gdp_mutex fs_reclaim irq_context: 0 misc_mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx gdp_mutex pool_lock#2 irq_context: 0 misc_mtx gdp_mutex lock irq_context: 0 misc_mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 misc_mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 misc_mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 misc_mtx lock irq_context: 0 misc_mtx lock kernfs_idr_lock irq_context: 0 misc_mtx &root->kernfs_rwsem irq_context: 0 misc_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 misc_mtx bus_type_sem irq_context: 0 misc_mtx sysfs_symlink_target_lock irq_context: 0 misc_mtx &root->kernfs_rwsem irq_context: 0 misc_mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 misc_mtx &dev->power.lock irq_context: 0 misc_mtx dpm_list_mtx irq_context: 0 misc_mtx req_lock irq_context: 0 misc_mtx &p->pi_lock irq_context: 0 misc_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 misc_mtx &x->wait#11 irq_context: 0 misc_mtx &rq->__lock irq_context: 0 misc_mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 misc_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx uevent_sock_mutex irq_context: 0 misc_mtx running_helpers_waitq.lock irq_context: 0 misc_mtx subsys mutex#20 irq_context: 0 misc_mtx subsys mutex#20 &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex irq_context: 0 input_mutex irq_context: 0 input_mutex input_devices_poll_wait.lock irq_context: 0 (netlink_chain).rwsem irq_context: 0 proto_tab_lock irq_context: 0 random_ready_notifier.lock irq_context: 0 random_ready_notifier.lock crngs.lock irq_context: 0 misc_mtx misc_minors_ida.xa_lock irq_context: 0 misc_mtx &c->lock irq_context: hardirq &rq->__lock &rt_b->rt_runtime_lock irq_context: hardirq &rq->__lock &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: hardirq &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock irq_context: hardirq &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 wtd_deferred_reg_mutex irq_context: 0 &rq->__lock &rt_rq->rt_runtime_lock irq_context: 0 &type->s_umount_key#13/1 irq_context: 0 &type->s_umount_key#13/1 fs_reclaim irq_context: 0 &type->s_umount_key#13/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#13/1 pool_lock#2 irq_context: 0 &type->s_umount_key#13/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#13/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#13/1 shrinker_mutex irq_context: 0 &type->s_umount_key#13/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#13/1 sb_lock irq_context: 0 &type->s_umount_key#13/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#13/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#13/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#13/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#13/1 &sb->s_type->i_lock_key#12 irq_context: 0 &type->s_umount_key#13/1 &sb->s_type->i_lock_key#12 &dentry->d_lock irq_context: 0 &type->s_umount_key#13/1 &dentry->d_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stopper->lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &rq->__lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &x->wait#7 irq_context: 0 clocksource_mutex (console_sem).lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner_lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner irq_context: 0 clocksource_mutex console_lock console_srcu console_owner &port_lock_key irq_context: hardirq tick_broadcast_lock irq_context: hardirq tick_broadcast_lock jiffies_lock irq_context: 0 (wq_completion)events timer_update_work irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex patch_lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &base->lock &base->lock/1 irq_context: 0 &base->lock &base->lock/1 &tmc->lock irq_context: 0 &base->lock &base->lock/1 &tmc->lock &group->lock irq_context: 0 &tmc->lock irq_context: 0 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#14/1 irq_context: 0 &type->s_umount_key#14/1 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#14/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#14/1 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &retval->lock irq_context: 0 &type->s_umount_key#14/1 pcpu_alloc_mutex irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &c->lock irq_context: 0 &type->s_umount_key#14/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &type->s_umount_key#14/1 shrinker_mutex irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &type->s_umount_key#14/1 &c->lock irq_context: 0 &type->i_mutex_dir_key#3 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#14/1 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 &mm->mmap_lock fs_reclaim irq_context: 0 &type->s_umount_key#14/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#14/1 sb_lock irq_context: 0 &type->s_umount_key#14/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#14/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#14/1 &s->s_inode_list_lock irq_context: 0 &mm->mmap_lock pool_lock#2 irq_context: 0 &type->s_umount_key#14/1 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &mm->mmap_lock fs_reclaim irq_context: 0 &type->s_umount_key#14/1 &sb->s_type->i_lock_key#13 irq_context: 0 &type->s_umount_key#14/1 &sb->s_type->i_lock_key#13 &dentry->d_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#14/1 &dentry->d_lock irq_context: 0 &mm->mmap_lock &____s->seqcount irq_context: 0 &mm->mmap_lock &mm->page_table_lock irq_context: 0 &mm->mmap_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(ptdesc)#2 irq_context: 0 &mm->mmap_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &sb->s_type->i_mutex_key#5 irq_context: 0 &sb->s_type->i_mutex_key#5 &sb->s_type->i_lock_key#13 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &sb->s_type->i_mutex_key#5 rename_lock.seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock ptlock_ptr(ptdesc)#2 irq_context: 0 &sb->s_type->i_mutex_key#5 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock rcu_read_lock ptlock_ptr(ptdesc)#2 lock#4 irq_context: 0 &sb->s_type->i_mutex_key#5 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#5 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#5 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex irq_context: 0 &sig->cred_guard_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#5 &c->lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#5 tracefs_inode_lock irq_context: 0 &sb->s_type->i_mutex_key#5 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#5 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#5 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex init_fs.lock irq_context: 0 &sb->s_type->i_mutex_key#5 &sb->s_type->i_lock_key#13 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &p->pi_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#9 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 irq_context: 0 &type->s_umount_key#16/1 irq_context: 0 &type->s_umount_key#16/1 fs_reclaim irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#9 pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &type->s_umount_key#16/1 pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#16/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#16/1 shrinker_mutex irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 list_lrus_mutex irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 sb_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &type->s_umount_key#16/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#16/1 &c->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#16/1 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &nvmeq->sq_lock irq_context: 0 &type->s_umount_key#16/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#16/1 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &folio_wait_table[i] irq_context: 0 &sig->cred_guard_mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss irq_context: 0 &type->s_umount_key#16/1 &sb->s_type->i_lock_key#14 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#16/1 &sb->s_type->i_lock_key#14 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock ptlock_ptr(ptdesc)#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_log_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_log_wait.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 &type->s_umount_key#17/1 irq_context: 0 &type->s_umount_key#17/1 fs_reclaim irq_context: 0 &type->s_umount_key#17/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#17/1 pool_lock#2 irq_context: 0 &type->s_umount_key#17/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#17/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#17/1 shrinker_mutex irq_context: 0 &type->s_umount_key#17/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#17/1 sb_lock irq_context: 0 &type->s_umount_key#17/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#17/1 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 &type->s_umount_key#17/1 &s->s_inode_list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &type->s_umount_key#17/1 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex binfmt_lock irq_context: 0 &sig->cred_guard_mutex init_binfmt_misc.entries_lock irq_context: 0 &type->s_umount_key#17/1 &sb->s_type->i_lock_key#15 irq_context: 0 &type->s_umount_key#17/1 &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 &type->s_umount_key#17/1 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 irq_context: 0 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#18/1 irq_context: 0 &type->s_umount_key#18/1 fs_reclaim irq_context: 0 &type->s_umount_key#18/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#18/1 pool_lock#2 irq_context: 0 &type->s_umount_key#18/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#18/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#18/1 shrinker_mutex irq_context: 0 &type->s_umount_key#18/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#18/1 sb_lock irq_context: 0 &type->s_umount_key#18/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#18/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#18/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#18/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#18/1 &sb->s_type->i_lock_key#16 irq_context: 0 &type->s_umount_key#18/1 &sb->s_type->i_lock_key#16 &dentry->d_lock irq_context: 0 &type->s_umount_key#18/1 &dentry->d_lock irq_context: 0 bio_slab_lock slab_mutex &c->lock irq_context: 0 bio_slab_lock slab_mutex &____s->seqcount irq_context: 0 kclist_lock irq_context: 0 kclist_lock resource_lock irq_context: 0 kclist_lock fs_reclaim irq_context: 0 kclist_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kclist_lock pool_lock#2 irq_context: 0 &type->s_umount_key#19/1 irq_context: 0 &type->s_umount_key#19/1 fs_reclaim irq_context: 0 &type->s_umount_key#19/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#19/1 pool_lock#2 irq_context: 0 &type->s_umount_key#19/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#19/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#19/1 shrinker_mutex irq_context: 0 &type->s_umount_key#19/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#19/1 sb_lock irq_context: 0 &type->s_umount_key#19/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#19/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#19/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#19/1 &____s->seqcount irq_context: 0 &type->s_umount_key#19/1 &c->lock irq_context: 0 &type->s_umount_key#19/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#19/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#19/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#19/1 &sb->s_type->i_lock_key#17 irq_context: 0 &type->s_umount_key#19/1 &sb->s_type->i_lock_key#17 &dentry->d_lock irq_context: 0 &type->s_umount_key#19/1 &dentry->d_lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &____s->seqcount irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &c->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &rt_b->rt_runtime_lock irq_context: hardirq &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: hardirq &rt_rq->rt_runtime_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#2 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &____s->seqcount irq_context: 0 tomoyo_ss irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss pool_lock#2 irq_context: 0 tomoyo_ss tomoyo_policy_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 tomoyo_ss (console_sem).lock irq_context: 0 tomoyo_ss console_lock console_srcu console_owner_lock irq_context: 0 tomoyo_ss console_lock console_srcu console_owner irq_context: 0 tomoyo_ss console_lock console_srcu console_owner &port_lock_key irq_context: 0 tomoyo_ss console_lock console_srcu console_owner console_owner_lock irq_context: 0 pnp_lock irq_context: 0 pnp_lock fs_reclaim irq_context: 0 pnp_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pnp_lock pool_lock#2 irq_context: 0 &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 &device->physical_node_lock fs_reclaim irq_context: 0 &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &device->physical_node_lock pool_lock#2 irq_context: 0 &device->physical_node_lock lock irq_context: 0 &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 fwnode_link_lock irq_context: 0 fwnode_link_lock &k->k_lock irq_context: 0 &dev->mutex device_links_srcu irq_context: 0 &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex device_links_lock irq_context: 0 &dev->mutex fs_reclaim irq_context: 0 &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex &dev->devres_lock irq_context: 0 &dev->mutex pinctrl_list_mutex irq_context: 0 &dev->mutex devtree_lock irq_context: 0 &dev->mutex pinctrl_maps_mutex irq_context: 0 &dev->mutex pinctrl_list_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex pinctrl_list_mutex pool_lock#2 irq_context: 0 &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex lock irq_context: 0 &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex pnp_lock irq_context: 0 &dev->mutex resource_lock irq_context: 0 &dev->mutex (console_sem).lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex fwnode_link_lock &k->k_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex probe_waitqueue.lock irq_context: 0 subsys mutex#21 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &mapping->i_private_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock &sb->s_type->i_lock_key#23 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#23 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->xattr_sem irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->xattr_sem mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_lock_key#23 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 subsys mutex#22 irq_context: 0 subsys mutex#22 &k->k_lock irq_context: 0 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 fill_pool_map-wait-type-override pool_lock irq_context: 0 subsys mutex#15 irq_context: 0 subsys mutex#15 &k->k_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &simple_offset_lock_class &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &simple_offset_lock_class &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 quarantine_lock irq_context: 0 subsys mutex#23 irq_context: 0 subsys mutex#23 &k->k_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &simple_offset_lock_class &____s->seqcount irq_context: 0 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_callback pcpu_lock irq_context: softirq led_lock irq_context: 0 misc_mtx quarantine_lock irq_context: 0 subsys mutex#24 irq_context: 0 subsys mutex#24 &k->list_lock irq_context: 0 subsys mutex#24 &k->k_lock irq_context: 0 wq_pool_mutex &xa->xa_lock irq_context: 0 wq_pool_mutex wq_pool_attach_mutex irq_context: 0 wq_pool_mutex &pool->lock irq_context: 0 wq_pool_mutex &pool->lock &p->pi_lock irq_context: 0 wq_pool_mutex &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 netevent_notif_chain.lock irq_context: 0 devices_rwsem irq_context: 0 devices_rwsem clients_rwsem irq_context: 0 devices_rwsem clients_rwsem fs_reclaim irq_context: 0 devices_rwsem clients_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 devices_rwsem clients_rwsem clients.xa_lock irq_context: 0 devices_rwsem clients_rwsem clients.xa_lock pool_lock#2 irq_context: 0 (blocking_lsm_notifier_chain).rwsem irq_context: 0 (inetaddr_chain).rwsem irq_context: 0 inet6addr_chain.lock irq_context: 0 buses_mutex irq_context: 0 offload_lock irq_context: 0 inetsw_lock irq_context: 0 ptype_lock irq_context: 0 (wq_completion)events_power_efficient irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex free_vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &vn->busy.lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &____s->seqcount irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex init_mm.page_table_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &c->lock irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &base->lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem slab_mutex irq_context: 0 pernet_ops_rwsem slab_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem slab_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem slab_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 bh_lock irq_context: 0 bh_lock &____s->seqcount irq_context: 0 bh_lock &c->lock irq_context: 0 bh_lock pool_lock#2 irq_context: 0 tcp_ulp_list_lock irq_context: 0 &hashinfo->lock irq_context: 0 k-slock-AF_INET/1 irq_context: 0 xfrm_state_afinfo_lock irq_context: 0 xfrm_policy_afinfo_lock irq_context: 0 xfrm_input_afinfo_lock irq_context: 0 pernet_ops_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock irq_context: 0 rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock &base->lock irq_context: 0 rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: hardirq rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-slock-AF_INET/1 irq_context: 0 tcp_cong_list_lock irq_context: 0 mptcp_sched_list_lock irq_context: softirq &group->lock irq_context: 0 pernet_ops_rwsem cache_list_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) cache_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &base->lock &obj_hash[i].lock irq_context: 0 (rpc_pipefs_notifier_list).rwsem irq_context: 0 svc_xprt_class_lock irq_context: 0 xprt_list_lock irq_context: 0 xprt_list_lock (console_sem).lock irq_context: 0 xprt_list_lock console_lock console_srcu console_owner_lock irq_context: 0 xprt_list_lock console_lock console_srcu console_owner irq_context: 0 xprt_list_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 xprt_list_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 umhelper_sem irq_context: 0 (wq_completion)async (work_completion)(&entry->work) fs_reclaim irq_context: 0 (wq_completion)async (work_completion)(&entry->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 umhelper_sem usermodehelper_disabled_waitq.lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &____s->seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &c->lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) rcu_read_lock init_fs.seq.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &sb->s_type->i_mutex_key &c->lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &sb->s_type->i_mutex_key &____s->seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dentry->d_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dentry->d_lock &obj_hash[i].lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dentry->d_lock pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &obj_hash[i].lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 mount_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 rename_lock.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &dentry->d_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &c->lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &s->s_inode_list_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tk_core.seq.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &c->lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_log_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_log_wait.lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tk_core.seq.seqcount irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &wb->list_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &wb->list_lock &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &sb->s_type->i_lock_key#2 irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &pcp->lock &zone->lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) free_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &vn->busy.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_mm.page_table_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_files.file_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_fs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_fs.lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->alloc_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) input_pool.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &rq->__lock irq_context: 0 umh_sysctl_lock irq_context: 0 async_done.lock irq_context: 0 &drv->dynids.lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &dentry->d_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)async (work_completion)(&entry->work) async_done.lock &p->pi_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) async_done.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) async_done.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &dentry->d_lock &obj_hash[i].lock irq_context: 0 &dentry->d_lock pool_lock#2 irq_context: 0 &tsk->futex_exit_mutex irq_context: 0 &tsk->futex_exit_mutex &p->pi_lock irq_context: 0 &p->alloc_lock &fs->lock irq_context: 0 &child->perf_event_mutex irq_context: 0 css_set_lock irq_context: 0 &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 tasklist_lock irq_context: 0 tasklist_lock &pid->wait_pidfd irq_context: 0 tasklist_lock &sighand->siglock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit irq_context: 0 tasklist_lock &sighand->siglock input_pool.lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 &pid->wait_pidfd irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 pidmap_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 pool_lock#2 irq_context: 0 tasklist_lock &obj_hash[i].lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up batched_entropy_u8.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up kfence_freelist_lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 misc_mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &obj_hash[i].lock pool_lock irq_context: 0 &dentry->d_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dentry->d_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 pidmap_lock &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 pidmap_lock pool_lock#2 irq_context: 0 subsys mutex#25 irq_context: 0 subsys mutex#26 irq_context: 0 subsys mutex#26 &k->list_lock irq_context: 0 subsys mutex#26 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 subsys mutex#27 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &____s->seqcount irq_context: 0 &dentry->d_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &dentry->d_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pmus_lock fs_reclaim irq_context: 0 pmus_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pmus_lock &k->list_lock irq_context: 0 pmus_lock lock irq_context: 0 pmus_lock lock kernfs_idr_lock irq_context: 0 pmus_lock &root->kernfs_rwsem irq_context: 0 pmus_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pmus_lock uevent_sock_mutex irq_context: 0 pmus_lock rcu_read_lock &pool->lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pmus_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pmus_lock running_helpers_waitq.lock irq_context: 0 pmus_lock &rq->__lock irq_context: 0 pmus_lock &x->wait#8 irq_context: 0 pmus_lock bus_type_sem irq_context: 0 pmus_lock sysfs_symlink_target_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 pidmap_lock &obj_hash[i].lock pool_lock irq_context: 0 pmus_lock &k->k_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pmus_lock &root->kernfs_rwsem irq_context: 0 pmus_lock &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 fill_pool_map-wait-type-override pool_lock irq_context: 0 pmus_lock &____s->seqcount irq_context: 0 pmus_lock &dev->power.lock irq_context: 0 pmus_lock dpm_list_mtx irq_context: 0 pmus_lock &dev->mutex &k->list_lock irq_context: 0 pmus_lock &dev->mutex &k->k_lock irq_context: 0 pmus_lock &dev->mutex &dev->power.lock irq_context: 0 pmus_lock subsys mutex#28 irq_context: 0 pmus_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pmus_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pmus_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock crngs.lock irq_context: 0 &dentry->d_lock &obj_hash[i].lock pool_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 pidmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 pidmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 key_user_lock irq_context: 0 key_serial_lock irq_context: 0 key_construction_mutex irq_context: 0 &type->lock_class irq_context: 0 &type->lock_class keyring_serialise_link_lock irq_context: 0 &type->lock_class keyring_serialise_link_lock fs_reclaim irq_context: 0 &type->lock_class keyring_serialise_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->lock_class keyring_serialise_link_lock pool_lock#2 irq_context: 0 &type->lock_class keyring_serialise_link_lock &obj_hash[i].lock irq_context: 0 keyring_serialise_link_lock irq_context: 0 &pgdat->kswapd_lock fs_reclaim irq_context: 0 &pgdat->kswapd_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pgdat->kswapd_lock pool_lock#2 irq_context: 0 &pgdat->kswapd_lock kthread_create_lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock irq_context: 0 &pgdat->kswapd_lock &x->wait irq_context: 0 &pgdat->kswapd_lock &rq->__lock irq_context: 0 &pgdat->kswapd_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pgdat->kswapd_lock &obj_hash[i].lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pgdat->kswapd_wait irq_context: 0 list_lrus_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &____s->seqcount irq_context: 0 drivers_lock irq_context: 0 misc_mtx &pcp->lock &zone->lock irq_context: 0 misc_mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 misc_mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key &c->lock irq_context: 0 &type->s_umount_key#20/1 irq_context: 0 &type->s_umount_key#20/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key &____s->seqcount irq_context: 0 &type->s_umount_key#20/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#20/1 pool_lock#2 irq_context: 0 &type->s_umount_key#20/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#20/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#20/1 &c->lock irq_context: 0 &type->s_umount_key#20/1 &____s->seqcount irq_context: 0 &type->s_umount_key#20/1 shrinker_mutex irq_context: 0 &type->s_umount_key#20/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#20/1 sb_lock irq_context: 0 &type->s_umount_key#20/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#20/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#20/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#20/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#20/1 &sb->s_type->i_lock_key#18 irq_context: 0 &type->s_umount_key#20/1 &sb->s_type->i_lock_key#18 &dentry->d_lock irq_context: 0 &type->s_umount_key#20/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#21/1 irq_context: 0 &type->s_umount_key#21/1 fs_reclaim irq_context: 0 &type->s_umount_key#21/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#21/1 pool_lock#2 irq_context: 0 &type->s_umount_key#21/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#21/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#21/1 shrinker_mutex irq_context: 0 &type->s_umount_key#21/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#21/1 sb_lock irq_context: 0 &type->s_umount_key#21/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#21/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#21/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#21/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#21/1 &sb->s_type->i_lock_key#19 irq_context: 0 &type->s_umount_key#21/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &type->s_umount_key#21/1 &dentry->d_lock irq_context: 0 configfs_subsystem_mutex irq_context: 0 &sb->s_type->i_mutex_key#6/1 irq_context: 0 &sb->s_type->i_mutex_key#6/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#6/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#6/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#6/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#6/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#6/1 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#6/1 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#6/1 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#6/1 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#6/1 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#6/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#6/1 &default_group_class[depth - 1]/2 irq_context: 0 ecryptfs_daemon_hash_mux irq_context: 0 ecryptfs_daemon_hash_mux fs_reclaim irq_context: 0 ecryptfs_daemon_hash_mux fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ecryptfs_daemon_hash_mux pool_lock#2 irq_context: 0 ecryptfs_msg_ctx_lists_mux irq_context: 0 ecryptfs_msg_ctx_lists_mux &ecryptfs_msg_ctx_arr[i].mux irq_context: 0 misc_mtx &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ecryptfs_kthread_ctl.wait irq_context: 0 pernet_ops_rwsem tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem &k->list_lock irq_context: 0 pernet_ops_rwsem lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem running_helpers_waitq.lock irq_context: 0 pernet_ops_rwsem &rq->__lock irq_context: 0 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 nfs_version_lock irq_context: 0 key_types_sem irq_context: 0 key_types_sem (console_sem).lock irq_context: 0 key_types_sem console_lock console_srcu console_owner_lock irq_context: 0 key_types_sem console_lock console_srcu console_owner irq_context: 0 key_types_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 key_types_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 pnfs_spinlock irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slab_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem krc.lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem &s->s_inode_list_lock irq_context: 0 nls_lock irq_context: softirq &(&cache_cleaner)->timer irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_callback put_task_map-wait-type-override &obj_hash[i].lock irq_context: softirq rcu_callback put_task_map-wait-type-override pool_lock#2 irq_context: softirq (&tcp_orphan_timer) irq_context: softirq (&tcp_orphan_timer) &obj_hash[i].lock irq_context: softirq (&tcp_orphan_timer) &base->lock irq_context: softirq (&tcp_orphan_timer) &base->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback put_task_map-wait-type-override &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &vn->busy.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &vn->lazy.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &obj_hash[i].lock pool_lock irq_context: 0 jffs2_compressor_list_lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 next_tag_value_lock irq_context: 0 free_vmap_area_lock &obj_hash[i].lock irq_context: 0 free_vmap_area_lock pool_lock#2 irq_context: 0 log_redrive_lock irq_context: 0 &TxAnchor.LazyLock irq_context: 0 &TxAnchor.LazyLock jfs_commit_thread_wait.lock irq_context: 0 jfsTxnLock irq_context: 0 ocfs2_stack_lock irq_context: 0 ocfs2_stack_lock (console_sem).lock irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner_lock irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 o2hb_callback_sem irq_context: 0 o2net_handler_lock irq_context: 0 subsys mutex#29 irq_context: 0 subsys mutex#29 &k->k_lock irq_context: 0 slab_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#22/1 irq_context: 0 &type->s_umount_key#22/1 fs_reclaim irq_context: 0 &type->s_umount_key#22/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#22/1 pool_lock#2 irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#22/1 shrinker_mutex irq_context: 0 &type->s_umount_key#22/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#22/1 sb_lock irq_context: 0 &type->s_umount_key#22/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#22/1 &____s->seqcount irq_context: 0 &type->s_umount_key#22/1 &c->lock irq_context: 0 &type->s_umount_key#22/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#22/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#22/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#22/1 &sb->s_type->i_lock_key#20 irq_context: 0 &type->s_umount_key#22/1 &sb->s_type->i_lock_key#20 &dentry->d_lock irq_context: 0 &type->s_umount_key#22/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#23/1 irq_context: 0 &type->s_umount_key#23/1 fs_reclaim irq_context: 0 &type->s_umount_key#23/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#23/1 pool_lock#2 irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#23/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#23/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#23/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#23/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#23/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#23/1 &rq->__lock irq_context: 0 &type->s_umount_key#23/1 shrinker_mutex irq_context: 0 &type->s_umount_key#23/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#23/1 sb_lock irq_context: 0 &type->s_umount_key#23/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#23/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#23/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#23/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#23/1 &sb->s_type->i_lock_key#21 irq_context: 0 &type->s_umount_key#23/1 &sb->s_type->i_lock_key#21 &dentry->d_lock irq_context: 0 &type->s_umount_key#23/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#23/1 &c->lock irq_context: 0 &type->s_umount_key#23/1 &____s->seqcount irq_context: 0 &type->s_umount_key#23/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#23/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cipso_v4_doi_list_lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex patch_lock irq_context: 0 bpf_crypto_types_sem irq_context: 0 bpf_crypto_types_sem fs_reclaim irq_context: 0 bpf_crypto_types_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bpf_crypto_types_sem batched_entropy_u8.lock irq_context: 0 bpf_crypto_types_sem kfence_freelist_lock irq_context: 0 crypto_alg_sem irq_context: 0 alg_types_sem irq_context: 0 alg_types_sem fs_reclaim irq_context: 0 alg_types_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 alg_types_sem pool_lock#2 irq_context: 0 dma_list_mutex irq_context: 0 asymmetric_key_parsers_sem irq_context: 0 asymmetric_key_parsers_sem (console_sem).lock irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner_lock irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 blkcg_pol_register_mutex irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex irq_context: 0 blkcg_pol_register_mutex cgroup_mutex irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &root->kernfs_rwsem irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex fs_reclaim irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex pool_lock#2 irq_context: 0 blkcg_pol_register_mutex cgroup_mutex fs_reclaim irq_context: 0 blkcg_pol_register_mutex cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 blkcg_pol_register_mutex cgroup_mutex pool_lock#2 irq_context: 0 blkcg_pol_register_mutex cgroup_mutex lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex lock kernfs_idr_lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 elv_list_lock irq_context: 0 crc_t10dif_mutex irq_context: 0 crc_t10dif_mutex crypto_alg_sem irq_context: 0 crc_t10dif_mutex fs_reclaim irq_context: 0 crc_t10dif_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crc_t10dif_mutex pool_lock#2 irq_context: 0 crc64_rocksoft_mutex irq_context: 0 crc64_rocksoft_mutex crypto_alg_sem irq_context: 0 crc64_rocksoft_mutex fs_reclaim irq_context: 0 crc64_rocksoft_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crc64_rocksoft_mutex pool_lock#2 irq_context: 0 ts_mod_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 tasklist_lock quarantine_lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 fill_pool_map-wait-type-override &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#4 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key rcu_read_lock &dentry->d_lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex pool_lock#2 irq_context: 0 pci_ep_cfs_subsys.su_mutex irq_context: 0 &default_group_class[depth - 1]#2/1 irq_context: 0 &default_group_class[depth - 1]#2/1 fs_reclaim irq_context: 0 &default_group_class[depth - 1]#2/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &default_group_class[depth - 1]#2/1 pool_lock#2 irq_context: 0 &default_group_class[depth - 1]#2/1 &dentry->d_lock irq_context: 0 &default_group_class[depth - 1]#2/1 configfs_dirent_lock irq_context: 0 &default_group_class[depth - 1]#2/1 mmu_notifier_invalidate_range_start irq_context: 0 &default_group_class[depth - 1]#2/1 &s->s_inode_list_lock irq_context: 0 &default_group_class[depth - 1]#2/1 tk_core.seq.seqcount irq_context: 0 &default_group_class[depth - 1]#2/1 &sb->s_type->i_lock_key#19 irq_context: 0 &default_group_class[depth - 1]#2/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &default_group_class[depth - 1]#2/1 &sb->s_type->i_mutex_key#7/2 irq_context: softirq rcu_callback put_task_map-wait-type-override fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_callback put_task_map-wait-type-override fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock pool_lock#2 irq_context: 0 wq_pool_mutex &cfs_rq->removed.lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#6/1 &sb->s_type->i_mutex_key#7/2 irq_context: 0 pci_epf_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ipmi_interfaces_mutex irq_context: 0 ipmi_interfaces_mutex &k->list_lock irq_context: 0 ipmi_interfaces_mutex fs_reclaim irq_context: 0 ipmi_interfaces_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ipmi_interfaces_mutex pool_lock#2 irq_context: 0 ipmi_interfaces_mutex lock irq_context: 0 ipmi_interfaces_mutex lock kernfs_idr_lock irq_context: 0 ipmi_interfaces_mutex &root->kernfs_rwsem irq_context: 0 ipmi_interfaces_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 ipmi_interfaces_mutex &k->k_lock irq_context: 0 ipmi_interfaces_mutex &c->lock irq_context: 0 ipmi_interfaces_mutex &____s->seqcount irq_context: 0 ipmi_interfaces_mutex uevent_sock_mutex irq_context: 0 ipmi_interfaces_mutex &obj_hash[i].lock irq_context: 0 ipmi_interfaces_mutex rcu_read_lock &pool->lock irq_context: 0 ipmi_interfaces_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 ipmi_interfaces_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 ipmi_interfaces_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 ipmi_interfaces_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 ipmi_interfaces_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 ipmi_interfaces_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 ipmi_interfaces_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ipmi_interfaces_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 ipmi_interfaces_mutex running_helpers_waitq.lock irq_context: 0 ipmi_interfaces_mutex pcpu_alloc_mutex irq_context: 0 ipmi_interfaces_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex pcpu_alloc_mutex irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex fs_reclaim irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex pool_lock#2 irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex &wq->mutex irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex &obj_hash[i].lock irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex kthread_create_lock irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex &p->pi_lock irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex &x->wait irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex &rq->__lock irq_context: 0 ipmi_interfaces_mutex wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ipmi_interfaces_mutex &base->lock irq_context: 0 ipmi_interfaces_mutex &base->lock &obj_hash[i].lock irq_context: 0 ipmi_interfaces_mutex panic_notifier_list.lock irq_context: 0 smi_watchers_mutex irq_context: 0 smi_watchers_mutex &ipmi_interfaces_srcu irq_context: 0 smi_infos_lock irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock pool_lock irq_context: 0 &root->kernfs_rwsem &rq->__lock irq_context: 0 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock irq_context: 0 rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex &device->physical_node_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock semaphore->lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock fs_reclaim irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock wakeup_ida.xa_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &c->lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &____s->seqcount irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &x->wait#8 irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->list_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &k->list_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &c->lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock bus_type_sem irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock uevent_sock_mutex irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->k_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#12 irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#12 &k->k_lock irq_context: 0 &dev->mutex acpi_pm_notifier_install_lock acpi_pm_notifier_lock events_lock irq_context: 0 &dev->mutex acpi_wakeup_lock irq_context: 0 &dev->mutex semaphore->lock irq_context: 0 &dev->mutex *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex irq_domain_mutex irq_context: 0 &dev->mutex &domain->mutex irq_context: 0 &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex kthread_create_lock irq_context: 0 &dev->mutex &p->pi_lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &x->wait irq_context: 0 &dev->mutex &rq->__lock irq_context: 0 &dev->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rcu_read_lock &rq->__lock irq_context: 0 &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &desc->request_mutex irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class irq_controller_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &dev->mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &desc->wait_for_threads irq_context: 0 &desc->wait_for_threads irq_context: 0 &desc->wait_for_threads &p->pi_lock irq_context: 0 &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 &dev->mutex register_lock irq_context: 0 &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: 0 &dev->mutex register_lock proc_subdir_lock irq_context: 0 &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock irq_context: 0 &dev->mutex register_lock fs_reclaim irq_context: 0 &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &dev->mutex register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_lock pool_lock#2 irq_context: 0 &p->pi_lock &rq->__lock &rt_rq->rt_runtime_lock irq_context: 0 &dev->mutex register_lock proc_inum_ida.xa_lock irq_context: 0 &dev->mutex register_lock proc_subdir_lock irq_context: 0 &dev->mutex register_lock &c->lock irq_context: 0 &dev->mutex register_lock &____s->seqcount irq_context: 0 &dev->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex proc_subdir_lock irq_context: 0 &dev->mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex proc_subdir_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex &x->wait#8 irq_context: 0 &dev->mutex gdp_mutex irq_context: 0 &dev->mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex gdp_mutex lock irq_context: 0 &dev->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex bus_type_sem irq_context: 0 &dev->mutex &c->lock irq_context: 0 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex dpm_list_mtx irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex subsys mutex#30 irq_context: 0 &dev->mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock irq_context: softirq rcu_callback &obj_hash[i].lock pool_lock irq_context: softirq rcu_callback put_task_map-wait-type-override quarantine_lock irq_context: softirq rcu_callback &base->lock irq_context: softirq rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rq->__lock irq_context: 0 &dev->mutex input_mutex irq_context: 0 &dev->mutex input_mutex fs_reclaim irq_context: 0 &dev->mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex input_mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex input_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 &dev->mutex input_mutex kfence_freelist_lock irq_context: 0 &dev->mutex input_mutex &dev->mutex#2 irq_context: 0 &dev->mutex input_mutex input_devices_poll_wait.lock irq_context: 0 &dev->mutex wakeup_ida.xa_lock irq_context: 0 &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#12 irq_context: 0 &dev->mutex subsys mutex#12 &k->k_lock irq_context: 0 &dev->mutex events_lock irq_context: 0 &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex input_mutex pool_lock#2 irq_context: 0 register_count_mutex irq_context: 0 register_count_mutex &k->list_lock irq_context: 0 register_count_mutex fs_reclaim irq_context: 0 register_count_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_count_mutex pool_lock#2 irq_context: 0 register_count_mutex lock irq_context: 0 register_count_mutex lock kernfs_idr_lock irq_context: 0 register_count_mutex &root->kernfs_rwsem irq_context: 0 register_count_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_count_mutex &k->k_lock irq_context: 0 register_count_mutex uevent_sock_mutex irq_context: 0 register_count_mutex &obj_hash[i].lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 register_count_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 register_count_mutex running_helpers_waitq.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up semaphore->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up thermal_cdev_ida.xa_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up cpufreq_driver_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#8 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up bus_type_sem irq_context: 0 cpu_hotplug_lock cpuhp_state-up sysfs_symlink_target_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->power.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up dpm_list_mtx irq_context: 0 cpu_hotplug_lock cpuhp_state-up uevent_sock_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up running_helpers_waitq.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#31 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#31 &k->k_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up thermal_list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up *(&acpi_gbl_reference_count_lock) irq_context: 0 wq_pool_mutex pool_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &k->list_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) lock kernfs_idr_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)async (work_completion)(&entry->work) &k->k_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) uevent_sock_mutex irq_context: 0 (wq_completion)async (work_completion)(&entry->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)async (work_completion)(&entry->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)async (work_completion)(&entry->work) running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &rq->__lock irq_context: 0 scmi_requested_devices_mtx irq_context: 0 scmi_requested_devices_mtx fs_reclaim irq_context: 0 scmi_requested_devices_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 scmi_requested_devices_mtx pool_lock#2 irq_context: 0 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &port_lock_key irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key &rq->__lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &rq->__lock irq_context: 0 &dev->mutex iommu_probe_device_lock irq_context: softirq lib/debugobjects.c:102 irq_context: softirq lib/debugobjects.c:102 rcu_read_lock &pool->lock irq_context: softirq lib/debugobjects.c:102 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq lib/debugobjects.c:102 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq lib/debugobjects.c:102 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq lib/debugobjects.c:102 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex acpi_link_lock irq_context: 0 &dev->mutex acpi_link_lock fs_reclaim irq_context: 0 &dev->mutex acpi_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex acpi_link_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_link_lock semaphore->lock irq_context: 0 &dev->mutex acpi_link_lock &obj_hash[i].lock irq_context: 0 &dev->mutex acpi_link_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex acpi_link_lock &____s->seqcount irq_context: 0 &dev->mutex acpi_link_lock &c->lock irq_context: 0 &dev->mutex acpi_link_lock (console_sem).lock irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex &drv->dynids.lock irq_context: 0 &dev->mutex cpu_add_remove_lock irq_context: 0 &dev->mutex pci_lock irq_context: 0 (wq_completion)events (debug_obj_work).work irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex virtio_index_ida.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_node_0 irq_context: 0 &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex subsys mutex#32 irq_context: 0 vdpa_dev_lock irq_context: 0 gdp_mutex &c->lock irq_context: 0 gdp_mutex &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#2 irq_context: 0 &type->i_mutex_dir_key#2 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#2 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock &wq irq_context: 0 subsys mutex#33 irq_context: 0 subsys mutex#33 &k->k_lock irq_context: 0 scmi_requested_devices_mtx &____s->seqcount irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &simple_offset_lock_class &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &simple_offset_lock_class &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sched_map-wait-type-override &rq->__lock irq_context: 0 sched_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 sched_map-wait-type-override &obj_hash[i].lock irq_context: 0 sched_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &simple_offset_lock_class quarantine_lock irq_context: 0 &x->wait#11 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &meta->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 kfence_freelist_lock irq_context: softirq rcu_callback quarantine_lock irq_context: 0 pool_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock irq_context: 0 port_mutex irq_context: 0 port_mutex fs_reclaim irq_context: 0 port_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 port_mutex pool_lock#2 irq_context: 0 port_mutex &x->wait#8 irq_context: 0 port_mutex &obj_hash[i].lock irq_context: 0 port_mutex &k->list_lock irq_context: 0 port_mutex lock irq_context: 0 port_mutex lock kernfs_idr_lock irq_context: 0 port_mutex &root->kernfs_rwsem irq_context: 0 port_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 port_mutex bus_type_sem irq_context: 0 port_mutex sysfs_symlink_target_lock irq_context: 0 port_mutex &k->k_lock irq_context: 0 port_mutex &root->kernfs_rwsem irq_context: 0 port_mutex &dev->power.lock irq_context: 0 port_mutex dpm_list_mtx irq_context: 0 port_mutex uevent_sock_mutex irq_context: 0 port_mutex rcu_read_lock &pool->lock irq_context: 0 port_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 port_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 port_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 port_mutex running_helpers_waitq.lock irq_context: 0 port_mutex &dev->mutex &dev->power.lock irq_context: 0 port_mutex &dev->mutex &k->list_lock irq_context: 0 port_mutex &dev->mutex &k->k_lock irq_context: 0 port_mutex &dev->mutex device_links_srcu irq_context: 0 port_mutex &dev->mutex fwnode_link_lock irq_context: 0 port_mutex &dev->mutex device_links_lock irq_context: 0 port_mutex &dev->mutex fs_reclaim irq_context: 0 port_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 port_mutex &dev->mutex pool_lock#2 irq_context: 0 port_mutex &dev->mutex &dev->devres_lock irq_context: 0 port_mutex &dev->mutex pinctrl_list_mutex irq_context: 0 port_mutex &dev->mutex devtree_lock irq_context: 0 port_mutex &dev->mutex pinctrl_maps_mutex irq_context: 0 port_mutex &dev->mutex pinctrl_list_mutex &obj_hash[i].lock irq_context: 0 port_mutex &dev->mutex pinctrl_list_mutex pool_lock#2 irq_context: 0 port_mutex &dev->mutex &obj_hash[i].lock irq_context: 0 port_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 port_mutex &dev->mutex lock irq_context: 0 port_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 port_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 port_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 port_mutex &dev->mutex deferred_probe_mutex irq_context: 0 port_mutex &dev->mutex uevent_sock_mutex irq_context: 0 port_mutex &dev->mutex rcu_read_lock &pool->lock irq_context: 0 port_mutex &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 port_mutex &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 port_mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 port_mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 port_mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 port_mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 port_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 port_mutex subsys mutex#13 irq_context: 0 port_mutex &xa->xa_lock#3 irq_context: 0 port_mutex &c->lock irq_context: 0 port_mutex &____s->seqcount irq_context: 0 port_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 port_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 port_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 port_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 port_mutex &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 port_mutex &dev->mutex &c->lock irq_context: 0 port_mutex &dev->mutex &____s->seqcount irq_context: 0 port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 port_mutex &port->mutex irq_context: 0 port_mutex &port->mutex fs_reclaim irq_context: 0 port_mutex &port->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 port_mutex &port->mutex pool_lock#2 irq_context: 0 port_mutex &port->mutex console_mutex irq_context: 0 port_mutex &port->mutex ctrl_ida.xa_lock irq_context: 0 port_mutex &port->mutex &x->wait#8 irq_context: 0 port_mutex &port->mutex &obj_hash[i].lock irq_context: 0 port_mutex &port->mutex &obj_hash[i].lock pool_lock irq_context: 0 port_mutex &port->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 port_mutex &port->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 port_mutex &port->mutex &dev->power.lock irq_context: 0 port_mutex &port->mutex &k->list_lock irq_context: 0 port_mutex &port->mutex lock irq_context: 0 port_mutex &port->mutex lock kernfs_idr_lock irq_context: 0 port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 port_mutex &port->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 port_mutex &port->mutex bus_type_sem irq_context: 0 port_mutex &port->mutex sysfs_symlink_target_lock irq_context: 0 port_mutex &port->mutex &k->k_lock irq_context: 0 port_mutex &port->mutex dpm_list_mtx irq_context: 0 port_mutex &port->mutex uevent_sock_mutex irq_context: 0 port_mutex &port->mutex rcu_read_lock &pool->lock irq_context: 0 port_mutex &port->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 port_mutex &port->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 port_mutex &port->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 port_mutex &port->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 port_mutex &port->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 port_mutex &port->mutex running_helpers_waitq.lock irq_context: 0 port_mutex &port->mutex &dev->mutex &dev->power.lock irq_context: 0 port_mutex &port->mutex &dev->mutex &port_lock_key irq_context: 0 port_mutex &port->mutex &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 port_mutex &port->mutex &dev->mutex &k->list_lock irq_context: 0 port_mutex &port->mutex &dev->mutex &k->k_lock irq_context: 0 port_mutex &port->mutex &dev->mutex &dev->power.lock hrtimer_bases.lock irq_context: 0 port_mutex &port->mutex &dev->mutex &dev->power.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 port_mutex &port->mutex subsys mutex#14 irq_context: 0 port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 port_mutex &port->mutex dev_pm_qos_sysfs_mtx irq_context: 0 port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 port_mutex &port->mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 port_mutex &port->mutex kernfs_idr_lock irq_context: 0 port_mutex &port->mutex &k->k_lock klist_remove_lock irq_context: 0 port_mutex &port->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 port_mutex &port->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 port_mutex &port->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 port_mutex &port->mutex deferred_probe_mutex irq_context: 0 port_mutex &port->mutex device_links_lock irq_context: 0 port_mutex &port->mutex mmu_notifier_invalidate_range_start irq_context: 0 port_mutex &port->mutex &____s->seqcount irq_context: 0 port_mutex &port->mutex gdp_mutex irq_context: 0 port_mutex &port->mutex gdp_mutex &k->list_lock irq_context: 0 port_mutex &port->mutex gdp_mutex fs_reclaim irq_context: 0 port_mutex &port->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 port_mutex &port->mutex gdp_mutex pool_lock#2 irq_context: 0 port_mutex &port->mutex gdp_mutex lock irq_context: 0 port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 port_mutex &port->mutex &c->lock irq_context: 0 port_mutex &port->mutex lock kernfs_idr_lock &c->lock irq_context: 0 port_mutex &port->mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 port_mutex &port->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 port_mutex &port->mutex req_lock irq_context: 0 port_mutex &port->mutex &p->pi_lock irq_context: 0 port_mutex &port->mutex &p->pi_lock &rq->__lock irq_context: 0 port_mutex &port->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 port_mutex &port->mutex &x->wait#11 irq_context: 0 port_mutex &port->mutex &rq->__lock irq_context: 0 port_mutex &port->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 port_mutex &port->mutex subsys mutex#15 irq_context: 0 port_mutex &port->mutex subsys mutex#15 &k->k_lock irq_context: 0 port_mutex &port->mutex chrdevs_lock irq_context: 0 port_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 port_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 port_mutex &port->mutex &pcp->lock &zone->lock irq_context: 0 port_mutex &port->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 port_mutex &port->mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 port_mutex &port->mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 port_mutex &port->mutex &cfs_rq->removed.lock irq_context: 0 port_mutex &obj_hash[i].lock pool_lock irq_context: 0 port_mutex &port->mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 port_mutex &port->mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 port_mutex &port->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 port_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 port_mutex &port->mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rng_index_ida.xa_lock irq_context: 0 &dev->mutex &md->mutex irq_context: 0 &dev->mutex &md->mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex free_vmap_area_lock irq_context: 0 &dev->mutex &vn->busy.lock irq_context: 0 &dev->mutex &md->mutex pci_lock irq_context: 0 &dev->mutex &md->mutex &xa->xa_lock#6 irq_context: 0 &dev->mutex &md->mutex &xa->xa_lock#6 pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &its->dev_alloc_lock irq_context: 0 &dev->mutex &md->mutex &its->dev_alloc_lock &its->lock irq_context: 0 &dev->mutex &md->mutex &its->dev_alloc_lock fs_reclaim irq_context: 0 &dev->mutex &md->mutex &its->dev_alloc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &its->dev_alloc_lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &its->dev_alloc_lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &md->mutex &its->dev_alloc_lock &____s->seqcount irq_context: 0 &dev->mutex &md->mutex &its->dev_alloc_lock pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &its->dev_alloc_lock lpi_range_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock fs_reclaim irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &obj_hash[i].lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &____s->seqcount irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &c->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex &domain->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &domain->mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &md->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &md->mutex tmpmask_lock irq_context: 0 &dev->mutex &md->mutex &its->lock irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex lock irq_context: 0 &dev->mutex &md->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class &gic_data_rdist()->rd_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock tmpmask_lock irq_context: 0 &dev->mutex &zone->lock irq_context: 0 &dev->mutex &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &dev->vqs_list_lock irq_context: 0 &dev->mutex &vp_dev->lock irq_context: 0 &dev->mutex rng_mutex irq_context: 0 &dev->mutex rng_mutex &x->wait#13 irq_context: 0 &dev->mutex rng_mutex fs_reclaim irq_context: 0 &dev->mutex rng_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rng_mutex pool_lock#2 irq_context: 0 &dev->mutex rng_mutex kthread_create_lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rng_mutex &rq->__lock irq_context: hardirq &x->wait#14 irq_context: 0 &dev->mutex rng_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex rng_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rng_mutex &x->wait irq_context: 0 &dev->mutex &dev->config_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rng_mutex irq_context: 0 (wq_completion)events (debug_obj_work).work pool_lock#2 irq_context: 0 reading_mutex irq_context: 0 wq_pool_mutex batched_entropy_u8.lock irq_context: 0 wq_pool_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 wq_pool_mutex kfence_freelist_lock irq_context: 0 misc_mtx &cfs_rq->removed.lock irq_context: softirq drivers/char/random.c:1026 irq_context: softirq drivers/char/random.c:1026 input_pool.lock irq_context: 0 &dev->devres_lock irq_context: 0 &dev->managed.lock irq_context: 0 &type->s_umount_key#24/1 irq_context: 0 &type->s_umount_key#24/1 fs_reclaim irq_context: 0 &type->s_umount_key#24/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#24/1 pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#24/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#24/1 &c->lock irq_context: 0 &type->s_umount_key#24/1 &____s->seqcount irq_context: 0 &type->s_umount_key#24/1 shrinker_mutex irq_context: 0 &type->s_umount_key#24/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#24/1 sb_lock irq_context: 0 &type->s_umount_key#24/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#24/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#24/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#24/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#24/1 &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#24/1 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &type->s_umount_key#24/1 &dentry->d_lock irq_context: 0 lock drm_minor_lock irq_context: 0 lock drm_minor_lock pool_lock#2 irq_context: 0 stack_depot_init_mutex irq_context: 0 subsys mutex#34 irq_context: 0 subsys mutex#34 &k->k_lock irq_context: 0 drm_minor_lock irq_context: 0 &dev->mode_config.idr_mutex irq_context: 0 &dev->mode_config.idr_mutex fs_reclaim irq_context: 0 &dev->mode_config.idr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mode_config.idr_mutex pool_lock#2 irq_context: 0 crtc_ww_class_acquire irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_acquire irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_acquire reservation_ww_class_mutex irq_context: 0 &dev->mode_config.blob_lock irq_context: 0 &xa->xa_lock#7 irq_context: 0 &xa->xa_lock#8 irq_context: 0 &dev->mode_config.connector_list_lock irq_context: 0 &dev->vbl_lock irq_context: 0 drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 drm_connector_list_iter fs_reclaim irq_context: 0 drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex irq_context: 0 drm_connector_list_iter &connector->mutex fs_reclaim irq_context: 0 drm_connector_list_iter &connector->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &x->wait#8 irq_context: 0 drm_connector_list_iter &connector->mutex &obj_hash[i].lock irq_context: 0 drm_connector_list_iter &connector->mutex &k->list_lock irq_context: 0 drm_connector_list_iter &connector->mutex lock irq_context: 0 drm_connector_list_iter &connector->mutex lock kernfs_idr_lock irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex bus_type_sem irq_context: 0 drm_connector_list_iter &connector->mutex sysfs_symlink_target_lock irq_context: 0 drm_connector_list_iter &connector->mutex &c->lock irq_context: 0 drm_connector_list_iter &connector->mutex &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex &dev->power.lock irq_context: 0 drm_connector_list_iter &connector->mutex dpm_list_mtx irq_context: 0 drm_connector_list_iter &connector->mutex uevent_sock_mutex irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 drm_connector_list_iter &connector->mutex running_helpers_waitq.lock irq_context: 0 drm_connector_list_iter &connector->mutex &k->k_lock irq_context: 0 drm_connector_list_iter &connector->mutex subsys mutex#34 irq_context: 0 drm_connector_list_iter &connector->mutex subsys mutex#34 &k->k_lock irq_context: 0 drm_connector_list_iter &connector->mutex pin_fs_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#8 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 drm_connector_list_iter &connector->mutex &dev->mode_config.idr_mutex irq_context: 0 drm_connector_list_iter &connector->mutex &rq->__lock irq_context: 0 drm_connector_list_iter &connector->mutex connector_list_lock irq_context: 0 drm_connector_list_iter &connector->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &dev->filelist_mutex irq_context: 0 &dev->clientlist_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &c->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &sbinfo->stat_lock irq_context: 0 &dev->clientlist_mutex &helper->lock mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &s->s_inode_list_lock irq_context: 0 &dev->clientlist_mutex &helper->lock tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock batched_entropy_u32.lock irq_context: 0 &dev->clientlist_mutex &helper->lock batched_entropy_u32.lock crngs.lock irq_context: 0 &dev->clientlist_mutex &helper->lock &sb->s_type->i_lock_key irq_context: 0 &dev->clientlist_mutex &helper->lock &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &mgr->vm_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &mgr->vm_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock lock &file_private->table_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock lock &file_private->table_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &node->vm_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &file_private->table_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->mode_config.idr_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->mode_config.fb_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &file->fbs_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &prime_fpriv->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &node->vm_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &node->vm_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &file_private->table_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &file_private->table_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &xa->xa_lock#9 irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &sb->s_type->i_lock_key irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &info->lock irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex lock#4 irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &xa->xa_lock#9 pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex lock#4 &lruvec->lru_lock irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &rq->__lock irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &xa->xa_lock#9 &c->lock irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &xa->xa_lock#9 &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex free_vmap_area_lock irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex &vn->busy.lock irq_context: 0 &dev->clientlist_mutex &helper->lock reservation_ww_class_mutex init_mm.page_table_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock irq_context: 0 &dev->clientlist_mutex registration_lock fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock &x->wait#8 irq_context: 0 &dev->clientlist_mutex registration_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock lock irq_context: 0 &dev->clientlist_mutex registration_lock lock kernfs_idr_lock irq_context: 0 &dev->clientlist_mutex registration_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock bus_type_sem irq_context: 0 &dev->clientlist_mutex registration_lock &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock sysfs_symlink_target_lock irq_context: 0 &dev->clientlist_mutex registration_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock &dev->power.lock irq_context: 0 &dev->clientlist_mutex registration_lock dpm_list_mtx irq_context: 0 &dev->clientlist_mutex registration_lock req_lock irq_context: 0 &dev->clientlist_mutex registration_lock &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock &x->wait#11 irq_context: 0 &dev->clientlist_mutex registration_lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex registration_lock uevent_sock_mutex irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock running_helpers_waitq.lock irq_context: 0 &dev->clientlist_mutex registration_lock &k->k_lock irq_context: 0 &dev->clientlist_mutex registration_lock subsys mutex#9 irq_context: 0 &dev->clientlist_mutex registration_lock subsys mutex#9 &k->k_lock irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock (console_sem).lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &fb_info->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock vt_event_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &base->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &base->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &x->wait#8 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock gdp_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock gdp_mutex &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock lock kernfs_idr_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock bus_type_sem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock sysfs_symlink_target_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &dev->power.lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock dpm_list_mtx irq_context: 0 &dev->clientlist_mutex registration_lock console_lock uevent_sock_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock running_helpers_waitq.lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock subsys mutex#5 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock subsys mutex#5 &k->k_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex irq_context: